tsk               217 arch/alpha/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               233 arch/alpha/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               239 arch/alpha/include/asm/mmu_context.h 	if (tsk != current)
tsk               240 arch/alpha/include/asm/mmu_context.h 		task_thread_info(tsk)->pcb.ptbr
tsk               252 arch/alpha/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               254 arch/alpha/include/asm/mmu_context.h 	task_thread_info(tsk)->pcb.ptbr
tsk               281 arch/alpha/include/asm/pgtable.h #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
tsk                47 arch/alpha/include/asm/processor.h #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
tsk                49 arch/alpha/include/asm/processor.h #define KSTK_ESP(tsk) \
tsk                50 arch/alpha/include/asm/processor.h   ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
tsk                35 arch/alpha/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                37 arch/alpha/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                16 arch/arc/include/asm/bug.h void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
tsk               212 arch/arc/include/asm/entry.h .macro GET_TSK_STACK_BASE tsk, out
tsk               252 arch/arc/include/asm/entry.h .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
tsk               269 arch/arc/include/asm/entry.h .macro  SET_CURR_TASK_ON_CPU    tsk, tmp
tsk               106 arch/arc/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               130 arch/arc/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               171 arch/arc/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)   do { } while (0)
tsk               173 arch/arc/include/asm/mmu_context.h #define enter_lazy_tlb(mm, tsk)
tsk                78 arch/arc/include/asm/processor.h #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
tsk                79 arch/arc/include/asm/processor.h #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
tsk                85 arch/arc/include/asm/processor.h #define TSK_K_ESP(tsk)		(tsk->thread.ksp)
tsk                87 arch/arc/include/asm/processor.h #define TSK_K_REG(tsk, off)	(*((unsigned long *)(TSK_K_ESP(tsk) + \
tsk                90 arch/arc/include/asm/processor.h #define TSK_K_BLINK(tsk)	TSK_K_REG(tsk, 4)
tsk                91 arch/arc/include/asm/processor.h #define TSK_K_FP(tsk)		TSK_K_REG(tsk, 0)
tsk                30 arch/arc/include/asm/stacktrace.h 	struct task_struct *tsk, struct pt_regs *regs,
tsk                53 arch/arc/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                55 arch/arc/include/asm/thread_info.h 	.task       = &tsk,			\
tsk                64 arch/arc/include/asm/unwind.h #define STACK_BOTTOM_UNW(tsk)	STACK_LIMIT((tsk)->thread.ksp)
tsk                65 arch/arc/include/asm/unwind.h #define STACK_TOP_UNW(tsk)	((tsk)->thread.ksp)
tsk                13 arch/arc/kernel/ptrace.c static struct callee_regs *task_callee_regs(struct task_struct *tsk)
tsk                15 arch/arc/kernel/ptrace.c 	struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
tsk                41 arch/arc/kernel/stacktrace.c static void seed_unwind_frame_info(struct task_struct *tsk,
tsk                49 arch/arc/kernel/stacktrace.c 	if (tsk == NULL && regs == NULL) {
tsk                73 arch/arc/kernel/stacktrace.c 		frame_info->task = tsk;
tsk                75 arch/arc/kernel/stacktrace.c 		frame_info->regs.r27 = TSK_K_FP(tsk);
tsk                76 arch/arc/kernel/stacktrace.c 		frame_info->regs.r28 = TSK_K_ESP(tsk);
tsk                77 arch/arc/kernel/stacktrace.c 		frame_info->regs.r31 = TSK_K_BLINK(tsk);
tsk                98 arch/arc/kernel/stacktrace.c 		frame_info->task = tsk;
tsk               111 arch/arc/kernel/stacktrace.c arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
tsk               119 arch/arc/kernel/stacktrace.c 	seed_unwind_frame_info(tsk, regs, &frame_info);
tsk               220 arch/arc/kernel/stacktrace.c noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
tsk               223 arch/arc/kernel/stacktrace.c 	arc_unwind_core(tsk, regs, __print_sym, NULL);
tsk               228 arch/arc/kernel/stacktrace.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               230 arch/arc/kernel/stacktrace.c 	show_stacktrace(tsk, NULL);
tsk               237 arch/arc/kernel/stacktrace.c unsigned int get_wchan(struct task_struct *tsk)
tsk               239 arch/arc/kernel/stacktrace.c 	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
tsk               248 arch/arc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               251 arch/arc/kernel/stacktrace.c 	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
tsk                46 arch/arc/kernel/traps.c 		struct task_struct *tsk = current;
tsk                48 arch/arc/kernel/traps.c 		tsk->thread.fault_address = (__force unsigned int)addr;
tsk                61 arch/arc/kernel/troubleshoot.c static void print_task_path_n_nm(struct task_struct *tsk)
tsk                68 arch/arc/kernel/troubleshoot.c 	mm = get_task_mm(tsk);
tsk               180 arch/arc/kernel/troubleshoot.c 	struct task_struct *tsk = current;
tsk               189 arch/arc/kernel/troubleshoot.c 	print_task_path_n_nm(tsk);
tsk                64 arch/arc/mm/fault.c 	struct task_struct *tsk = current;
tsk                65 arch/arc/mm/fault.c 	struct mm_struct *mm = tsk->mm;
tsk               166 arch/arc/mm/fault.c 			tsk->maj_flt++;
tsk               170 arch/arc/mm/fault.c 			tsk->min_flt++;
tsk               195 arch/arc/mm/fault.c 	tsk->thread.fault_address = address;
tsk               130 arch/arm/include/asm/hw_breakpoint.h extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
tsk               138 arch/arm/include/asm/hw_breakpoint.h static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk) {}
tsk                28 arch/arm/include/asm/mmu_context.h void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
tsk                30 arch/arm/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk                51 arch/arm/include/asm/mmu_context.h 					    struct task_struct *tsk)
tsk                96 arch/arm/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               117 arch/arm/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               129 arch/arm/include/asm/mmu_context.h 	  struct task_struct *tsk)
tsk               145 arch/arm/include/asm/mmu_context.h 		check_and_switch_context(next, tsk);
tsk               152 arch/arm/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               101 arch/arm/include/asm/processor.h #define KSTK_EIP(tsk)	task_pt_regs(tsk)->ARM_pc
tsk               102 arch/arm/include/asm/processor.h #define KSTK_ESP(tsk)	task_pt_regs(tsk)->ARM_sp
tsk                70 arch/arm/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)						\
tsk                72 arch/arm/include/asm/thread_info.h 	.task		= &tsk,						\
tsk                94 arch/arm/include/asm/thread_info.h #define thread_saved_pc(tsk)	\
tsk                95 arch/arm/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
tsk                96 arch/arm/include/asm/thread_info.h #define thread_saved_sp(tsk)	\
tsk                97 arch/arm/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
tsk               100 arch/arm/include/asm/thread_info.h #define thread_saved_fp(tsk)	\
tsk               101 arch/arm/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
tsk               103 arch/arm/include/asm/thread_info.h #define thread_saved_fp(tsk)	\
tsk               104 arch/arm/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
tsk                86 arch/arm/include/asm/uaccess-asm.h 	.macro	uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
tsk               108 arch/arm/include/asm/uaccess-asm.h 	.macro	uaccess_exit, tsk, tmp0, tmp1
tsk                39 arch/arm/include/asm/unwind.h extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
tsk               199 arch/arm/kernel/process.c void exit_thread(struct task_struct *tsk)
tsk               201 arch/arm/kernel/process.c 	thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
tsk               207 arch/arm/kernel/process.c 	struct task_struct *tsk = current;
tsk               209 arch/arm/kernel/process.c 	flush_ptrace_hw_breakpoint(tsk);
tsk               212 arch/arm/kernel/process.c 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
tsk               251 arch/arm/kernel/ptrace.c static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
tsk               261 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->start_code;
tsk               263 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->start_data;
tsk               265 arch/arm/kernel/ptrace.c 		tmp = tsk->mm->end_code;
tsk               267 arch/arm/kernel/ptrace.c 		tmp = get_user_reg(tsk, off >> 2);
tsk               278 arch/arm/kernel/ptrace.c static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
tsk               287 arch/arm/kernel/ptrace.c 	return put_user_reg(tsk, off >> 2, val);
tsk               295 arch/arm/kernel/ptrace.c static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
tsk               297 arch/arm/kernel/ptrace.c 	struct thread_info *thread = task_thread_info(tsk);
tsk               309 arch/arm/kernel/ptrace.c static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
tsk               311 arch/arm/kernel/ptrace.c 	struct thread_info *thread = task_thread_info(tsk);
tsk               326 arch/arm/kernel/ptrace.c static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
tsk               328 arch/arm/kernel/ptrace.c 	struct thread_info *thread = task_thread_info(tsk);
tsk               338 arch/arm/kernel/ptrace.c static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
tsk               340 arch/arm/kernel/ptrace.c 	struct thread_info *thread = task_thread_info(tsk);
tsk               399 arch/arm/kernel/ptrace.c void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               401 arch/arm/kernel/ptrace.c 	memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
tsk               408 arch/arm/kernel/ptrace.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               411 arch/arm/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
tsk               442 arch/arm/kernel/ptrace.c static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
tsk               455 arch/arm/kernel/ptrace.c 					   tsk);
tsk               458 arch/arm/kernel/ptrace.c static int ptrace_gethbpregs(struct task_struct *tsk, long num,
tsk               475 arch/arm/kernel/ptrace.c 		bp = tsk->thread.debug.hbp[idx];
tsk               504 arch/arm/kernel/ptrace.c static int ptrace_sethbpregs(struct task_struct *tsk, long num,
tsk               531 arch/arm/kernel/ptrace.c 	bp = tsk->thread.debug.hbp[idx];
tsk               533 arch/arm/kernel/ptrace.c 		bp = ptrace_hbp_create(tsk, implied_type);
tsk               538 arch/arm/kernel/ptrace.c 		tsk->thread.debug.hbp[idx] = bp;
tsk               102 arch/arm/kernel/stacktrace.c static noinline void __save_stack_trace(struct task_struct *tsk,
tsk               112 arch/arm/kernel/stacktrace.c 	if (tsk != current) {
tsk               121 arch/arm/kernel/stacktrace.c 		frame.fp = thread_saved_fp(tsk);
tsk               122 arch/arm/kernel/stacktrace.c 		frame.sp = thread_saved_sp(tsk);
tsk               124 arch/arm/kernel/stacktrace.c 		frame.pc = thread_saved_pc(tsk);
tsk               155 arch/arm/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               157 arch/arm/kernel/stacktrace.c 	__save_stack_trace(tsk, trace, 1);
tsk               202 arch/arm/kernel/traps.c static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
tsk               204 arch/arm/kernel/traps.c 	unwind_backtrace(regs, tsk);
tsk               207 arch/arm/kernel/traps.c static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
tsk               214 arch/arm/kernel/traps.c 	if (!tsk)
tsk               215 arch/arm/kernel/traps.c 		tsk = current;
tsk               220 arch/arm/kernel/traps.c 	} else if (tsk != current) {
tsk               221 arch/arm/kernel/traps.c 		fp = thread_saved_fp(tsk);
tsk               234 arch/arm/kernel/traps.c 	} else if (fp < (unsigned long)end_of_stack(tsk))
tsk               243 arch/arm/kernel/traps.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               245 arch/arm/kernel/traps.c 	dump_backtrace(NULL, tsk);
tsk               267 arch/arm/kernel/traps.c 	struct task_struct *tsk = current;
tsk               275 arch/arm/kernel/traps.c 	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
tsk               282 arch/arm/kernel/traps.c 		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
tsk               286 arch/arm/kernel/traps.c 			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
tsk               287 arch/arm/kernel/traps.c 		dump_backtrace(regs, tsk);
tsk               458 arch/arm/kernel/unwind.c void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
tsk               462 arch/arm/kernel/unwind.c 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
tsk               464 arch/arm/kernel/unwind.c 	if (!tsk)
tsk               465 arch/arm/kernel/unwind.c 		tsk = current;
tsk               472 arch/arm/kernel/unwind.c 	} else if (tsk == current) {
tsk               479 arch/arm/kernel/unwind.c 		frame.fp = thread_saved_fp(tsk);
tsk               480 arch/arm/kernel/unwind.c 		frame.sp = thread_saved_sp(tsk);
tsk               486 arch/arm/kernel/unwind.c 		frame.pc = thread_saved_pc(tsk);
tsk               237 arch/arm/mm/context.c void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
tsk               138 arch/arm/mm/fault.c 	struct task_struct *tsk = current;
tsk               148 arch/arm/mm/fault.c 		       tsk->comm, sig, addr, fsr);
tsk               149 arch/arm/mm/fault.c 		show_pte(KERN_ERR, tsk->mm, addr);
tsk               157 arch/arm/mm/fault.c 				   tsk->comm, addr);
tsk               160 arch/arm/mm/fault.c 	tsk->thread.address = addr;
tsk               161 arch/arm/mm/fault.c 	tsk->thread.error_code = fsr;
tsk               162 arch/arm/mm/fault.c 	tsk->thread.trap_no = 14;
tsk               168 arch/arm/mm/fault.c 	struct task_struct *tsk = current;
tsk               169 arch/arm/mm/fault.c 	struct mm_struct *mm = tsk->active_mm;
tsk               204 arch/arm/mm/fault.c 		unsigned int flags, struct task_struct *tsk)
tsk               240 arch/arm/mm/fault.c 	struct task_struct *tsk;
tsk               249 arch/arm/mm/fault.c 	tsk = current;
tsk               250 arch/arm/mm/fault.c 	mm  = tsk->mm;
tsk               292 arch/arm/mm/fault.c 	fault = __do_page_fault(mm, addr, fsr, flags, tsk);
tsk               313 arch/arm/mm/fault.c 			tsk->maj_flt++;
tsk               317 arch/arm/mm/fault.c 			tsk->min_flt++;
tsk               178 arch/arm64/include/asm/mmu_context.h #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
tsk               181 arch/arm64/include/asm/mmu_context.h static inline void update_saved_ttbr0(struct task_struct *tsk,
tsk               194 arch/arm64/include/asm/mmu_context.h 	WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
tsk               197 arch/arm64/include/asm/mmu_context.h static inline void update_saved_ttbr0(struct task_struct *tsk,
tsk               204 arch/arm64/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               210 arch/arm64/include/asm/mmu_context.h 	update_saved_ttbr0(tsk, &init_mm);
tsk               231 arch/arm64/include/asm/mmu_context.h 	  struct task_struct *tsk)
tsk               242 arch/arm64/include/asm/mmu_context.h 	update_saved_ttbr0(tsk, next);
tsk               245 arch/arm64/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                66 arch/arm64/include/asm/pointer_auth.h extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
tsk                80 arch/arm64/include/asm/pointer_auth.h #define ptrauth_thread_init_user(tsk)					\
tsk                82 arch/arm64/include/asm/pointer_auth.h 	struct task_struct *__ptiu_tsk = (tsk);				\
tsk                87 arch/arm64/include/asm/pointer_auth.h #define ptrauth_thread_switch(tsk)	\
tsk                88 arch/arm64/include/asm/pointer_auth.h 	ptrauth_keys_switch(&(tsk)->thread.keys_user)
tsk                91 arch/arm64/include/asm/pointer_auth.h #define ptrauth_prctl_reset_keys(tsk, arg)	(-EINVAL)
tsk                93 arch/arm64/include/asm/pointer_auth.h #define ptrauth_thread_init_user(tsk)
tsk                94 arch/arm64/include/asm/pointer_auth.h #define ptrauth_thread_switch(tsk)
tsk                59 arch/arm64/include/asm/processor.h #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_32BIT) ? \
tsk               257 arch/arm64/include/asm/processor.h #define KSTK_EIP(tsk)	((unsigned long)task_pt_regs(tsk)->pc)
tsk               258 arch/arm64/include/asm/processor.h #define KSTK_ESP(tsk)	user_stack_pointer(task_pt_regs(tsk))
tsk               302 arch/arm64/include/asm/processor.h #define PAC_RESET_KEYS(tsk, arg)	ptrauth_prctl_reset_keys(tsk, arg)
tsk                64 arch/arm64/include/asm/stacktrace.h extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
tsk                65 arch/arm64/include/asm/stacktrace.h extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
tsk                67 arch/arm64/include/asm/stacktrace.h extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
tsk                92 arch/arm64/include/asm/stacktrace.h static inline bool on_task_stack(const struct task_struct *tsk,
tsk                96 arch/arm64/include/asm/stacktrace.h 	unsigned long low = (unsigned long)task_stack_page(tsk);
tsk               141 arch/arm64/include/asm/stacktrace.h static inline bool on_accessible_stack(const struct task_struct *tsk,
tsk               148 arch/arm64/include/asm/stacktrace.h 	if (on_task_stack(tsk, sp, info))
tsk               150 arch/arm64/include/asm/stacktrace.h 	if (tsk != current || preemptible())
tsk                46 arch/arm64/include/asm/thread_info.h #define thread_saved_pc(tsk)	\
tsk                47 arch/arm64/include/asm/thread_info.h 	((unsigned long)(tsk->thread.cpu_context.pc))
tsk                48 arch/arm64/include/asm/thread_info.h #define thread_saved_sp(tsk)	\
tsk                49 arch/arm64/include/asm/thread_info.h 	((unsigned long)(tsk->thread.cpu_context.sp))
tsk                50 arch/arm64/include/asm/thread_info.h #define thread_saved_fp(tsk)	\
tsk                51 arch/arm64/include/asm/thread_info.h 	((unsigned long)(tsk->thread.cpu_context.fp))
tsk                56 arch/arm64/include/asm/thread_info.h void arch_release_task_struct(struct task_struct *tsk);
tsk               105 arch/arm64/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)						\
tsk               162 arch/arm64/kernel/hw_breakpoint.c 	struct task_struct *tsk = bp->hw.target;
tsk               171 arch/arm64/kernel/hw_breakpoint.c 	return tsk && is_compat_thread(task_thread_info(tsk));
tsk                10 arch/arm64/kernel/pointer_auth.c int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
tsk                12 arch/arm64/kernel/pointer_auth.c 	struct ptrauth_keys *keys = &tsk->thread.keys_user;
tsk               332 arch/arm64/kernel/process.c void arch_release_task_struct(struct task_struct *tsk)
tsk               334 arch/arm64/kernel/process.c 	fpsimd_release_task(tsk);
tsk               209 arch/arm64/kernel/ptrace.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               212 arch/arm64/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
tsk               229 arch/arm64/kernel/ptrace.c void ptrace_hw_copy_thread(struct task_struct *tsk)
tsk               231 arch/arm64/kernel/ptrace.c 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
tsk               235 arch/arm64/kernel/ptrace.c 					       struct task_struct *tsk,
tsk               245 arch/arm64/kernel/ptrace.c 		bp = tsk->thread.debug.hbp_break[idx];
tsk               251 arch/arm64/kernel/ptrace.c 		bp = tsk->thread.debug.hbp_watch[idx];
tsk               260 arch/arm64/kernel/ptrace.c 				struct task_struct *tsk,
tsk               271 arch/arm64/kernel/ptrace.c 		tsk->thread.debug.hbp_break[idx] = bp;
tsk               278 arch/arm64/kernel/ptrace.c 		tsk->thread.debug.hbp_watch[idx] = bp;
tsk               288 arch/arm64/kernel/ptrace.c 					    struct task_struct *tsk,
tsk               317 arch/arm64/kernel/ptrace.c 	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
tsk               321 arch/arm64/kernel/ptrace.c 	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
tsk               387 arch/arm64/kernel/ptrace.c 			       struct task_struct *tsk,
tsk               391 arch/arm64/kernel/ptrace.c 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
tsk               401 arch/arm64/kernel/ptrace.c 			       struct task_struct *tsk,
tsk               405 arch/arm64/kernel/ptrace.c 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
tsk               415 arch/arm64/kernel/ptrace.c 							struct task_struct *tsk,
tsk               418 arch/arm64/kernel/ptrace.c 	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
tsk               421 arch/arm64/kernel/ptrace.c 		bp = ptrace_hbp_create(note_type, tsk, idx);
tsk               427 arch/arm64/kernel/ptrace.c 			       struct task_struct *tsk,
tsk               436 arch/arm64/kernel/ptrace.c 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
tsk               452 arch/arm64/kernel/ptrace.c 			       struct task_struct *tsk,
tsk               460 arch/arm64/kernel/ptrace.c 	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
tsk              1530 arch/arm64/kernel/ptrace.c static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
tsk              1539 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->start_code;
tsk              1541 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->start_data;
tsk              1543 arch/arm64/kernel/ptrace.c 		tmp = tsk->mm->end_code;
tsk              1545 arch/arm64/kernel/ptrace.c 		return copy_regset_to_user(tsk, &user_aarch32_view,
tsk              1556 arch/arm64/kernel/ptrace.c static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
tsk              1569 arch/arm64/kernel/ptrace.c 	ret = copy_regset_from_user(tsk, &user_aarch32_view,
tsk              1615 arch/arm64/kernel/ptrace.c 				 struct task_struct *tsk,
tsk              1625 arch/arm64/kernel/ptrace.c 		err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
tsk              1628 arch/arm64/kernel/ptrace.c 		err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
tsk              1636 arch/arm64/kernel/ptrace.c 				 struct task_struct *tsk,
tsk              1647 arch/arm64/kernel/ptrace.c 		err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
tsk              1650 arch/arm64/kernel/ptrace.c 		err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
tsk              1656 arch/arm64/kernel/ptrace.c static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
tsk              1664 arch/arm64/kernel/ptrace.c 		ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
tsk              1670 arch/arm64/kernel/ptrace.c 		ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
tsk              1679 arch/arm64/kernel/ptrace.c static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
tsk              1693 arch/arm64/kernel/ptrace.c 		ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
tsk              1695 arch/arm64/kernel/ptrace.c 		ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
tsk               785 arch/arm64/kernel/signal.c 	struct task_struct *tsk = current;
tsk               814 arch/arm64/kernel/signal.c 		user_fastforward_single_step(tsk);
tsk                41 arch/arm64/kernel/stacktrace.c int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
tsk                49 arch/arm64/kernel/stacktrace.c 	if (!tsk)
tsk                50 arch/arm64/kernel/stacktrace.c 		tsk = current;
tsk                52 arch/arm64/kernel/stacktrace.c 	if (!on_accessible_stack(tsk, fp, &info))
tsk                88 arch/arm64/kernel/stacktrace.c 	if (tsk->ret_stack &&
tsk                97 arch/arm64/kernel/stacktrace.c 		ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
tsk               117 arch/arm64/kernel/stacktrace.c void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
tsk               125 arch/arm64/kernel/stacktrace.c 		ret = unwind_frame(tsk, frame);
tsk               171 arch/arm64/kernel/stacktrace.c static noinline void __save_stack_trace(struct task_struct *tsk,
tsk               177 arch/arm64/kernel/stacktrace.c 	if (!try_get_task_stack(tsk))
tsk               184 arch/arm64/kernel/stacktrace.c 	if (tsk != current) {
tsk               185 arch/arm64/kernel/stacktrace.c 		start_backtrace(&frame, thread_saved_fp(tsk),
tsk               186 arch/arm64/kernel/stacktrace.c 				thread_saved_pc(tsk));
tsk               195 arch/arm64/kernel/stacktrace.c 	walk_stackframe(tsk, &frame, save_trace, &data);
tsk               197 arch/arm64/kernel/stacktrace.c 	put_task_stack(tsk);
tsk               201 arch/arm64/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               203 arch/arm64/kernel/stacktrace.c 	__save_stack_trace(tsk, trace, 1);
tsk                85 arch/arm64/kernel/traps.c void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
tsk                90 arch/arm64/kernel/traps.c 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
tsk                98 arch/arm64/kernel/traps.c 	if (!tsk)
tsk                99 arch/arm64/kernel/traps.c 		tsk = current;
tsk               101 arch/arm64/kernel/traps.c 	if (!try_get_task_stack(tsk))
tsk               104 arch/arm64/kernel/traps.c 	if (tsk == current) {
tsk               113 arch/arm64/kernel/traps.c 				thread_saved_fp(tsk),
tsk               114 arch/arm64/kernel/traps.c 				thread_saved_pc(tsk));
tsk               133 arch/arm64/kernel/traps.c 	} while (!unwind_frame(tsk, &frame));
tsk               135 arch/arm64/kernel/traps.c 	put_task_stack(tsk);
tsk               138 arch/arm64/kernel/traps.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               140 arch/arm64/kernel/traps.c 	dump_backtrace(NULL, tsk);
tsk               212 arch/arm64/kernel/traps.c 	struct task_struct *tsk = current;
tsk               213 arch/arm64/kernel/traps.c 	unsigned int esr = tsk->thread.fault_code;
tsk               214 arch/arm64/kernel/traps.c 	struct pt_regs *regs = task_pt_regs(tsk);
tsk               218 arch/arm64/kernel/traps.c 	    !unhandled_signal(tsk, signo) ||
tsk               222 arch/arm64/kernel/traps.c 	pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
tsk                85 arch/c6x/include/asm/processor.h #define thread_saved_ksp(tsk) \
tsk                86 arch/c6x/include/asm/processor.h 	(*(unsigned long *)&(tsk)->thread.b15_14)
tsk                87 arch/c6x/include/asm/processor.h #define thread_saved_dp(tsk) \
tsk                88 arch/c6x/include/asm/processor.h 	(*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
tsk                90 arch/c6x/include/asm/processor.h #define thread_saved_ksp(tsk) \
tsk                91 arch/c6x/include/asm/processor.h 	(*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
tsk                92 arch/c6x/include/asm/processor.h #define thread_saved_dp(tsk) \
tsk                93 arch/c6x/include/asm/processor.h 	(*(unsigned long *)&(tsk)->thread.b15_14)
tsk                19 arch/c6x/include/asm/switch_to.h 			     struct task_struct *tsk);
tsk                51 arch/c6x/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                53 arch/c6x/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                75 arch/csky/include/asm/elf.h extern int dump_task_regs(struct task_struct *tsk, elf_gregset_t *elf_regs);
tsk                76 arch/csky/include/asm/elf.h #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
tsk                26 arch/csky/include/asm/mmu_context.h #define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.asid, 0); 0; })
tsk                30 arch/csky/include/asm/mmu_context.h #define enter_lazy_tlb(mm, tsk)		do {} while (0)
tsk                31 arch/csky/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)		do {} while (0)
tsk                37 arch/csky/include/asm/mmu_context.h 	  struct task_struct *tsk)
tsk                83 arch/csky/include/asm/processor.h #define prepare_to_copy(tsk)    do { } while (0)
tsk                87 arch/csky/include/asm/processor.h #define copy_segments(tsk, mm)		do { } while (0)
tsk                91 arch/csky/include/asm/processor.h extern unsigned long thread_saved_pc(struct task_struct *tsk);
tsk                95 arch/csky/include/asm/processor.h #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
tsk                96 arch/csky/include/asm/processor.h #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->usp)
tsk                27 arch/csky/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                29 arch/csky/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                40 arch/csky/include/asm/thread_info.h #define thread_saved_fp(tsk) \
tsk                41 arch/csky/include/asm/thread_info.h 	((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8))
tsk                30 arch/csky/kernel/process.c unsigned long thread_saved_pc(struct task_struct *tsk)
tsk                32 arch/csky/kernel/process.c 	struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
tsk                85 arch/csky/kernel/process.c int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
tsk                87 arch/csky/kernel/process.c 	struct pt_regs *regs = task_pt_regs(tsk);
tsk                38 arch/csky/kernel/ptrace.c static void singlestep_disable(struct task_struct *tsk)
tsk                42 arch/csky/kernel/ptrace.c 	regs = task_pt_regs(tsk);
tsk                46 arch/csky/kernel/ptrace.c static void singlestep_enable(struct task_struct *tsk)
tsk                50 arch/csky/kernel/ptrace.c 	regs = task_pt_regs(tsk);
tsk                15 arch/csky/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                23 arch/csky/kernel/stacktrace.c 	if (tsk == current) {
tsk                27 arch/csky/kernel/stacktrace.c 		fp = (unsigned long *)thread_saved_fp(tsk);
tsk                43 arch/csky/kernel/stacktrace.c 			lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
tsk               118 arch/csky/kernel/traps.c 	struct task_struct *tsk = current;
tsk               133 arch/csky/kernel/traps.c 		tsk->thread.trap_no = vector;
tsk               151 arch/csky/kernel/traps.c 		tsk->thread.trap_no = vector;
tsk               155 arch/csky/kernel/traps.c 		tsk->thread.trap_no = vector;
tsk               160 arch/csky/kernel/traps.c 		tsk->thread.trap_no = vector;
tsk               164 arch/csky/kernel/traps.c 		tsk->thread.trap_no = vector;
tsk               174 arch/csky/kernel/traps.c 	tsk->thread.trap_no = vector;
tsk                50 arch/csky/mm/fault.c 	struct task_struct *tsk = current;
tsk                51 arch/csky/mm/fault.c 	struct mm_struct *mm = tsk->mm;
tsk               160 arch/csky/mm/fault.c 		tsk->maj_flt++;
tsk               164 arch/csky/mm/fault.c 		tsk->min_flt++;
tsk               182 arch/csky/mm/fault.c 		tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
tsk               188 arch/csky/mm/fault.c 	tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
tsk               204 arch/csky/mm/fault.c 	tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
tsk               214 arch/csky/mm/fault.c 	tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
tsk               110 arch/h8300/include/asm/processor.h #define	KSTK_EIP(tsk)	\
tsk               113 arch/h8300/include/asm/processor.h 		if ((tsk)->thread.esp0 > PAGE_SIZE &&	\
tsk               114 arch/h8300/include/asm/processor.h 		    MAP_NR((tsk)->thread.esp0) < max_mapnr)	 \
tsk               115 arch/h8300/include/asm/processor.h 			eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
tsk               118 arch/h8300/include/asm/processor.h #define	KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
tsk                40 arch/h8300/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                42 arch/h8300/include/asm/thread_info.h 	.task =		&tsk,			\
tsk                28 arch/hexagon/include/asm/mmu_context.h 	struct task_struct *tsk)
tsk                35 arch/hexagon/include/asm/mmu_context.h static inline void deactivate_mm(struct task_struct *tsk,
tsk                45 arch/hexagon/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk                56 arch/hexagon/include/asm/mmu_context.h 				struct task_struct *tsk)
tsk                60 arch/hexagon/include/asm/processor.h #define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk)))
tsk                61 arch/hexagon/include/asm/processor.h #define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk)))
tsk                63 arch/hexagon/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)                   \
tsk                65 arch/hexagon/include/asm/thread_info.h 	.task           = &tsk,                 \
tsk                18 arch/ia64/include/asm/cputime.h extern void arch_vtime_task_switch(struct task_struct *tsk);
tsk                53 arch/ia64/include/asm/mmu_context.h enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
tsk               181 arch/ia64/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               346 arch/ia64/include/asm/processor.h #define KSTK_EIP(tsk)					\
tsk               348 arch/ia64/include/asm/processor.h 	struct pt_regs *_regs = task_pt_regs(tsk);	\
tsk               353 arch/ia64/include/asm/processor.h #define KSTK_ESP(tsk)  ((tsk)->thread.ksp)
tsk                46 arch/ia64/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                48 arch/ia64/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                58 arch/ia64/include/asm/thread_info.h #define alloc_thread_stack_node(tsk, node)	\
tsk                59 arch/ia64/include/asm/thread_info.h 		((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
tsk                60 arch/ia64/include/asm/thread_info.h #define task_thread_info(tsk)	((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
tsk                63 arch/ia64/include/asm/thread_info.h #define alloc_thread_stack_node(tsk, node)	((unsigned long *) 0)
tsk                64 arch/ia64/include/asm/thread_info.h #define task_thread_info(tsk)	((struct thread_info *) 0)
tsk                66 arch/ia64/include/asm/thread_info.h #define free_thread_stack(tsk)	/* nothing */
tsk                67 arch/ia64/include/asm/thread_info.h #define task_stack_page(tsk)	((void *)(tsk))
tsk                91 arch/ia64/include/asm/thread_info.h #define free_task_struct(tsk)	free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
tsk               578 arch/ia64/kernel/process.c exit_thread (struct task_struct *tsk)
tsk               581 arch/ia64/kernel/process.c 	ia64_drop_fpu(tsk);
tsk               584 arch/ia64/kernel/process.c 	if (tsk->thread.pfm_context)
tsk               585 arch/ia64/kernel/process.c 		pfm_exit_thread(tsk);
tsk               588 arch/ia64/kernel/process.c 	if (tsk->thread.flags & IA64_THREAD_DBG_VALID)
tsk               589 arch/ia64/kernel/process.c 		pfm_release_debug_registers(tsk);
tsk              2133 arch/ia64/kernel/ptrace.c const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
tsk                66 arch/ia64/kernel/time.c void vtime_flush(struct task_struct *tsk)
tsk                68 arch/ia64/kernel/time.c 	struct thread_info *ti = task_thread_info(tsk);
tsk                72 arch/ia64/kernel/time.c 		account_user_time(tsk, cycle_to_nsec(ti->utime));
tsk                75 arch/ia64/kernel/time.c 		account_guest_time(tsk, cycle_to_nsec(ti->gtime));
tsk                82 arch/ia64/kernel/time.c 		account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
tsk                87 arch/ia64/kernel/time.c 		account_system_index_time(tsk, delta, CPUTIME_IRQ);
tsk                92 arch/ia64/kernel/time.c 		account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
tsk               121 arch/ia64/kernel/time.c static __u64 vtime_delta(struct task_struct *tsk)
tsk               123 arch/ia64/kernel/time.c 	struct thread_info *ti = task_thread_info(tsk);
tsk               135 arch/ia64/kernel/time.c void vtime_account_system(struct task_struct *tsk)
tsk               137 arch/ia64/kernel/time.c 	struct thread_info *ti = task_thread_info(tsk);
tsk               138 arch/ia64/kernel/time.c 	__u64 stime = vtime_delta(tsk);
tsk               140 arch/ia64/kernel/time.c 	if ((tsk->flags & PF_VCPU) && !irq_count())
tsk               151 arch/ia64/kernel/time.c void vtime_account_idle(struct task_struct *tsk)
tsk               153 arch/ia64/kernel/time.c 	struct thread_info *ti = task_thread_info(tsk);
tsk               155 arch/ia64/kernel/time.c 	ti->idle_time += vtime_delta(tsk);
tsk                 8 arch/m68k/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                56 arch/m68k/include/asm/mmu_context.h #define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
tsk                76 arch/m68k/include/asm/mmu_context.h 	struct task_struct *tsk)
tsk                78 arch/m68k/include/asm/mmu_context.h 	get_mmu_context(tsk->mm);
tsk                79 arch/m68k/include/asm/mmu_context.h 	set_context(tsk->mm->context, next->pgd);
tsk                93 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk, mm) do { } while (0)
tsk               169 arch/m68k/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk               198 arch/m68k/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               200 arch/m68k/include/asm/mmu_context.h 	activate_context(tsk->mm);
tsk               203 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
tsk               217 arch/m68k/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk               283 arch/m68k/include/asm/mmu_context.h static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
tsk               293 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               310 arch/m68k/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               316 arch/m68k/include/asm/mmu_context.h static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
tsk               321 arch/m68k/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               191 arch/m68k/include/asm/motorola_pgtable.h #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
tsk               130 arch/m68k/include/asm/processor.h #define	KSTK_EIP(tsk)	\
tsk               133 arch/m68k/include/asm/processor.h 	if ((tsk)->thread.esp0 > PAGE_SIZE && \
tsk               134 arch/m68k/include/asm/processor.h 	    (virt_addr_valid((tsk)->thread.esp0))) \
tsk               135 arch/m68k/include/asm/processor.h 	      eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
tsk               137 arch/m68k/include/asm/processor.h #define	KSTK_ESP(tsk)	((tsk) == current ? rdusp() : (tsk)->thread.usp)
tsk               139 arch/m68k/include/asm/processor.h #define task_pt_regs(tsk)	((struct pt_regs *) ((tsk)->thread.esp0))
tsk                37 arch/m68k/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                39 arch/m68k/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                39 arch/microblaze/include/asm/mmu_context_mm.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               106 arch/microblaze/include/asm/mmu_context_mm.h # define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
tsk               121 arch/microblaze/include/asm/mmu_context_mm.h 			     struct task_struct *tsk)
tsk               123 arch/microblaze/include/asm/mmu_context_mm.h 	tsk->thread.pgdir = next->pgd;
tsk                26 arch/microblaze/include/asm/processor.h #define task_pt_regs(tsk) \
tsk                27 arch/microblaze/include/asm/processor.h 		(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
tsk                68 arch/microblaze/include/asm/processor.h # define KSTK_EIP(tsk)	(0)
tsk                69 arch/microblaze/include/asm/processor.h # define KSTK_ESP(tsk)	(0)
tsk               119 arch/microblaze/include/asm/processor.h #  define task_pt_regs_plus_args(tsk) \
tsk               120 arch/microblaze/include/asm/processor.h 	((void *)task_pt_regs(tsk))
tsk               129 arch/microblaze/include/asm/processor.h #  define deactivate_mm(tsk, mm)	do { } while (0)
tsk                80 arch/microblaze/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                82 arch/microblaze/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                27 arch/microblaze/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                29 arch/microblaze/kernel/stacktrace.c 	microblaze_unwind(tsk, trace);
tsk                70 arch/mips/include/asm/dsemul.h extern bool dsemul_thread_cleanup(struct task_struct *tsk);
tsk                72 arch/mips/include/asm/dsemul.h static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
tsk                39 arch/mips/include/asm/dsp.h #define __save_dsp(tsk)							\
tsk                41 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[0] = mfhi1();				\
tsk                42 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[1] = mflo1();				\
tsk                43 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[2] = mfhi2();				\
tsk                44 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[3] = mflo2();				\
tsk                45 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[4] = mfhi3();				\
tsk                46 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr[5] = mflo3();				\
tsk                47 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK);			\
tsk                50 arch/mips/include/asm/dsp.h #define save_dsp(tsk)							\
tsk                53 arch/mips/include/asm/dsp.h 		__save_dsp(tsk);					\
tsk                56 arch/mips/include/asm/dsp.h #define __restore_dsp(tsk)						\
tsk                58 arch/mips/include/asm/dsp.h 	mthi1(tsk->thread.dsp.dspr[0]);					\
tsk                59 arch/mips/include/asm/dsp.h 	mtlo1(tsk->thread.dsp.dspr[1]);					\
tsk                60 arch/mips/include/asm/dsp.h 	mthi2(tsk->thread.dsp.dspr[2]);					\
tsk                61 arch/mips/include/asm/dsp.h 	mtlo2(tsk->thread.dsp.dspr[3]);					\
tsk                62 arch/mips/include/asm/dsp.h 	mthi3(tsk->thread.dsp.dspr[4]);					\
tsk                63 arch/mips/include/asm/dsp.h 	mtlo3(tsk->thread.dsp.dspr[5]);					\
tsk                64 arch/mips/include/asm/dsp.h 	wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK);			\
tsk                67 arch/mips/include/asm/dsp.h #define restore_dsp(tsk)						\
tsk                70 arch/mips/include/asm/dsp.h 		__restore_dsp(tsk);					\
tsk                73 arch/mips/include/asm/dsp.h #define __get_dsp_regs(tsk)						\
tsk                75 arch/mips/include/asm/dsp.h 	if (tsk == current)						\
tsk                78 arch/mips/include/asm/dsp.h 	tsk->thread.dsp.dspr;						\
tsk               164 arch/mips/include/asm/fpu.h static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
tsk               168 arch/mips/include/asm/fpu.h 			save_msa(tsk);
tsk               169 arch/mips/include/asm/fpu.h 			tsk->thread.fpu.fcr31 =
tsk               173 arch/mips/include/asm/fpu.h 		clear_tsk_thread_flag(tsk, TIF_USEDMSA);
tsk               177 arch/mips/include/asm/fpu.h 			_save_fp(tsk);
tsk               184 arch/mips/include/asm/fpu.h 	KSTK_STATUS(tsk) &= ~ST0_CU1;
tsk               185 arch/mips/include/asm/fpu.h 	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
tsk               227 arch/mips/include/asm/fpu.h static inline void save_fp(struct task_struct *tsk)
tsk               230 arch/mips/include/asm/fpu.h 		_save_fp(tsk);
tsk               233 arch/mips/include/asm/fpu.h static inline void restore_fp(struct task_struct *tsk)
tsk               236 arch/mips/include/asm/fpu.h 		_restore_fp(tsk);
tsk               239 arch/mips/include/asm/fpu.h static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
tsk               241 arch/mips/include/asm/fpu.h 	if (tsk == current) {
tsk               248 arch/mips/include/asm/fpu.h 	return tsk->thread.fpu.fpr;
tsk               289 arch/mips/include/asm/fpu.h static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
tsk               312 arch/mips/include/asm/fpu.h extern void save_fp(struct task_struct *tsk)
tsk               318 arch/mips/include/asm/fpu.h extern void restore_fp(struct task_struct *tsk)
tsk               324 arch/mips/include/asm/fpu.h extern union fpureg *get_fpu_regs(struct task_struct *tsk)
tsk               172 arch/mips/include/asm/fpu_emulator.h 		     struct task_struct *tsk);
tsk               127 arch/mips/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               140 arch/mips/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               159 arch/mips/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               189 arch/mips/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
tsk                67 arch/mips/include/asm/processor.h #define TASK_SIZE_OF(tsk)						\
tsk                68 arch/mips/include/asm/processor.h 	(test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
tsk               381 arch/mips/include/asm/processor.h #define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
tsk               383 arch/mips/include/asm/processor.h #define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
tsk               384 arch/mips/include/asm/processor.h #define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
tsk               385 arch/mips/include/asm/processor.h #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
tsk               386 arch/mips/include/asm/processor.h #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
tsk                43 arch/mips/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                45 arch/mips/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                84 arch/mips/kernel/process.c void exit_thread(struct task_struct *tsk)
tsk                91 arch/mips/kernel/process.c 		dsemul_thread_cleanup(tsk);
tsk               493 arch/mips/kernel/process.c static unsigned long thread_saved_pc(struct task_struct *tsk)
tsk               495 arch/mips/kernel/process.c 	struct thread_struct *t = &tsk->thread;
tsk                38 arch/mips/kernel/stacktrace.c 	struct task_struct *tsk, struct pt_regs *regs, int savesched)
tsk                47 arch/mips/kernel/stacktrace.c 			(unsigned long)task_stack_page(tsk);
tsk                62 arch/mips/kernel/stacktrace.c 		pc = unwind_stack(tsk, &sp, pc, &ra);
tsk                78 arch/mips/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                85 arch/mips/kernel/stacktrace.c 	if (tsk != current) {
tsk                86 arch/mips/kernel/stacktrace.c 		regs->regs[29] = tsk->thread.reg29;
tsk                88 arch/mips/kernel/stacktrace.c 		regs->cp0_epc = tsk->thread.reg31;
tsk                91 arch/mips/kernel/stacktrace.c 	save_context_stack(trace, tsk, regs, tsk == current);
tsk               721 arch/mips/kernel/traps.c 		     struct task_struct *tsk)
tsk               736 arch/mips/kernel/traps.c 	force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
tsk               146 arch/mips/kernel/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
tsk               148 arch/mips/kernel/uprobes.c 	if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
tsk               152 arch/mips/math-emu/dsemul.c bool dsemul_thread_cleanup(struct task_struct *tsk)
tsk               157 arch/mips/math-emu/dsemul.c 	fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
tsk               163 arch/mips/math-emu/dsemul.c 	task_lock(tsk);
tsk               166 arch/mips/math-emu/dsemul.c 	if (tsk->mm)
tsk               167 arch/mips/math-emu/dsemul.c 		free_emuframe(fr_idx, tsk->mm);
tsk               169 arch/mips/math-emu/dsemul.c 	task_unlock(tsk);
tsk                42 arch/mips/mm/fault.c 	struct task_struct *tsk = current;
tsk                43 arch/mips/mm/fault.c 	struct mm_struct *mm = tsk->mm;
tsk               174 arch/mips/mm/fault.c 			tsk->maj_flt++;
tsk               178 arch/mips/mm/fault.c 			tsk->min_flt++;
tsk               207 arch/mips/mm/fault.c 		tsk->thread.cp0_badvaddr = address;
tsk               208 arch/mips/mm/fault.c 		tsk->thread.error_code = write;
tsk               210 arch/mips/mm/fault.c 		    unhandled_signal(tsk, SIGSEGV) &&
tsk               213 arch/mips/mm/fault.c 				tsk->comm,
tsk               274 arch/mips/mm/fault.c 	       tsk->comm,
tsk               281 arch/mips/mm/fault.c 	tsk->thread.cp0_badvaddr = address;
tsk               109 arch/nds32/include/asm/fpu.h static inline void unlazy_fpu(struct task_struct *tsk)
tsk               112 arch/nds32/include/asm/fpu.h 	if (test_tsk_fpu(task_pt_regs(tsk)))
tsk               113 arch/nds32/include/asm/fpu.h 		save_fpu(tsk);
tsk                13 arch/nds32/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk                50 arch/nds32/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                55 arch/nds32/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk                65 arch/nds32/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                84 arch/nds32/include/asm/processor.h #define prepare_to_copy(tsk)	do { } while (0)
tsk                99 arch/nds32/include/asm/processor.h #define KSTK_EIP(tsk)	instruction_pointer(task_pt_regs(tsk))
tsk               100 arch/nds32/include/asm/processor.h #define KSTK_ESP(tsk)	user_stack_pointer(task_pt_regs(tsk))
tsk                37 arch/nds32/include/asm/stacktrace.h get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph);
tsk                30 arch/nds32/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)						\
tsk                35 arch/nds32/include/asm/thread_info.h #define thread_saved_pc(tsk) ((unsigned long)(tsk->thread.cpu_context.pc))
tsk                36 arch/nds32/include/asm/thread_info.h #define thread_saved_fp(tsk) ((unsigned long)(tsk->thread.cpu_context.fp))
tsk                21 arch/nds32/kernel/fpu.c void save_fpu(struct task_struct *tsk)
tsk                46 arch/nds32/kernel/fpu.c 			      : "r" (&tsk->thread.fpu)
tsk                59 arch/nds32/kernel/fpu.c 			      : "r" (&tsk->thread.fpu)
tsk                68 arch/nds32/kernel/fpu.c 			      : "r" (&tsk->thread.fpu)
tsk                79 arch/nds32/kernel/fpu.c 			      : "r"(&tsk->thread.fpu)
tsk              1255 arch/nds32/kernel/perf_event_cpu.c 						(tsk, &graph, frame->lp, NULL);
tsk               129 arch/nds32/kernel/process.c void exit_thread(struct task_struct *tsk)
tsk               132 arch/nds32/kernel/process.c 	if (last_task_used_math == tsk)
tsk               223 arch/nds32/kernel/process.c 	struct task_struct *tsk = current;
tsk               225 arch/nds32/kernel/process.c 	fpvalid = tsk_used_math(tsk);
tsk               228 arch/nds32/kernel/process.c 		memcpy(fpu, &tsk->thread.fpu, sizeof(*fpu));
tsk                28 arch/nds32/kernel/signal.c 	struct task_struct *tsk = current;
tsk                50 arch/nds32/kernel/signal.c 	return __copy_from_user(&tsk->thread.fpu, &sc->fpu,
tsk                57 arch/nds32/kernel/signal.c 	struct task_struct *tsk = current;
tsk                67 arch/nds32/kernel/signal.c 	if (last_task_used_math == tsk)
tsk                70 arch/nds32/kernel/signal.c 	unlazy_fpu(tsk);
tsk                72 arch/nds32/kernel/signal.c 	ret = __copy_to_user(&sc->fpu, &tsk->thread.fpu,
tsk                15 arch/nds32/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                22 arch/nds32/kernel/stacktrace.c 	if (tsk == current) {
tsk                26 arch/nds32/kernel/stacktrace.c 		fpn = (unsigned long *)thread_saved_fp(tsk);
tsk                39 arch/nds32/kernel/stacktrace.c 			lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
tsk               100 arch/nds32/kernel/traps.c static void __dump(struct task_struct *tsk, unsigned long *base_reg)
tsk               110 arch/nds32/kernel/traps.c 						tsk, &graph, ret_addr, NULL);
tsk               126 arch/nds32/kernel/traps.c 						tsk, &graph, ret_addr, NULL);
tsk               137 arch/nds32/kernel/traps.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               141 arch/nds32/kernel/traps.c 	if (!tsk)
tsk               142 arch/nds32/kernel/traps.c 		tsk = current;
tsk               144 arch/nds32/kernel/traps.c 		if (tsk != current)
tsk               145 arch/nds32/kernel/traps.c 			base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
tsk               149 arch/nds32/kernel/traps.c 		if (tsk != current)
tsk               150 arch/nds32/kernel/traps.c 			base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
tsk               154 arch/nds32/kernel/traps.c 	__dump(tsk, base_reg);
tsk               165 arch/nds32/kernel/traps.c 	struct task_struct *tsk = current;
tsk               177 arch/nds32/kernel/traps.c 		 tsk->comm, tsk->pid, end_of_stack(tsk));
tsk               260 arch/nds32/kernel/traps.c 	struct task_struct *tsk = current;
tsk               262 arch/nds32/kernel/traps.c 	tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
tsk               263 arch/nds32/kernel/traps.c 	tsk->thread.error_code = error_code;
tsk                73 arch/nds32/mm/fault.c 	struct task_struct *tsk;
tsk                82 arch/nds32/mm/fault.c 	tsk = current;
tsk                83 arch/nds32/mm/fault.c 	mm = tsk->mm;
tsk               236 arch/nds32/mm/fault.c 			tsk->maj_flt++;
tsk               240 arch/nds32/mm/fault.c 			tsk->min_flt++;
tsk               271 arch/nds32/mm/fault.c 		tsk->thread.address = addr;
tsk               272 arch/nds32/mm/fault.c 		tsk->thread.error_code = error_code;
tsk               273 arch/nds32/mm/fault.c 		tsk->thread.trap_no = entry;
tsk               340 arch/nds32/mm/fault.c 	tsk->thread.address = addr;
tsk               341 arch/nds32/mm/fault.c 	tsk->thread.error_code = error_code;
tsk               342 arch/nds32/mm/fault.c 	tsk->thread.trap_no = entry;
tsk                29 arch/nios2/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                39 arch/nios2/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk                55 arch/nios2/include/asm/mmu_context.h 		struct task_struct *tsk);
tsk                57 arch/nios2/include/asm/mmu_context.h static inline void deactivate_mm(struct task_struct *tsk,
tsk                78 arch/nios2/include/asm/processor.h #define KSTK_EIP(tsk)	((tsk)->thread.kregs->ea)
tsk                79 arch/nios2/include/asm/processor.h #define KSTK_ESP(tsk)	((tsk)->thread.kregs->sp)
tsk                57 arch/nios2/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                59 arch/nios2/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                46 arch/nios2/mm/fault.c 	struct task_struct *tsk = current;
tsk                47 arch/nios2/mm/fault.c 	struct mm_struct *mm = tsk->mm;
tsk                81 arch/nios2/mm/mmu_context.c 	       struct task_struct *tsk)
tsk                20 arch/openrisc/include/asm/mmu_context.h extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
tsk                23 arch/openrisc/include/asm/mmu_context.h 		      struct task_struct *tsk);
tsk                25 arch/openrisc/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
tsk                35 arch/openrisc/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk               218 arch/openrisc/include/asm/pgtable.h #define SET_PAGE_DIR(tsk, pgdir)
tsk                70 arch/openrisc/include/asm/processor.h #define KSTK_EIP(tsk)   (task_pt_regs(tsk)->pc)
tsk                71 arch/openrisc/include/asm/processor.h #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
tsk                68 arch/openrisc/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)				\
tsk                70 arch/openrisc/include/asm/thread_info.h 	.task		= &tsk,				\
tsk                67 arch/openrisc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                71 arch/openrisc/kernel/stacktrace.c 	if (tsk == current)
tsk                74 arch/openrisc/kernel/stacktrace.c 		sp = (unsigned long *) KSTK_ESP(tsk);
tsk                58 arch/openrisc/kernel/traps.c void show_trace_task(struct task_struct *tsk)
tsk                48 arch/openrisc/mm/fault.c 	struct task_struct *tsk;
tsk                55 arch/openrisc/mm/fault.c 	tsk = current;
tsk                95 arch/openrisc/mm/fault.c 	mm = tsk->mm;
tsk               180 arch/openrisc/mm/fault.c 			tsk->maj_flt++;
tsk               182 arch/openrisc/mm/fault.c 			tsk->min_flt++;
tsk               163 arch/openrisc/mm/tlb.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               309 arch/parisc/include/asm/elf.h #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
tsk                12 arch/parisc/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                23 arch/parisc/include/asm/mmu_context.h init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk                54 arch/parisc/include/asm/mmu_context.h 		struct mm_struct *next, struct task_struct *tsk)
tsk                63 arch/parisc/include/asm/mmu_context.h 		struct mm_struct *next, struct task_struct *tsk)
tsk                71 arch/parisc/include/asm/mmu_context.h 	switch_mm_irqs_off(prev, next, tsk);
tsk                76 arch/parisc/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                25 arch/parisc/include/asm/processor.h #define TASK_SIZE_OF(tsk)       ((tsk)->thread.task_size)
tsk               121 arch/parisc/include/asm/processor.h #define task_pt_regs(tsk) ((struct pt_regs *)&((tsk)->thread.regs))
tsk               287 arch/parisc/include/asm/processor.h #define KSTK_EIP(tsk)	((tsk)->thread.regs.iaoq[0])
tsk               288 arch/parisc/include/asm/processor.h #define KSTK_ESP(tsk)	((tsk)->thread.regs.gr[30])
tsk                14 arch/parisc/include/asm/syscall.h static inline long syscall_get_nr(struct task_struct *tsk,
tsk                20 arch/parisc/include/asm/syscall.h static inline void syscall_get_arguments(struct task_struct *tsk,
tsk                19 arch/parisc/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                21 arch/parisc/include/asm/thread_info.h 	.task		= &tsk,			\
tsk               168 arch/parisc/kernel/process.c int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
tsk               170 arch/parisc/kernel/process.c 	memcpy(r, tsk->thread.regs.fr, sizeof(*r));
tsk                42 arch/parisc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                44 arch/parisc/kernel/stacktrace.c 	dump_trace(tsk, trace);
tsk               235 arch/parisc/mm/fault.c 		unsigned long address, struct task_struct *tsk,
tsk               238 arch/parisc/mm/fault.c 	if (!unhandled_signal(tsk, SIGSEGV))
tsk               246 arch/parisc/mm/fault.c 	    tsk->comm, code, address);
tsk               263 arch/parisc/mm/fault.c 	struct task_struct *tsk;
tsk               272 arch/parisc/mm/fault.c 	tsk = current;
tsk               273 arch/parisc/mm/fault.c 	mm = tsk->mm;
tsk               395 arch/parisc/mm/fault.c 			tsk->comm, tsk->pid, address);
tsk               411 arch/parisc/mm/fault.c 		show_signal_msg(regs, code, address, tsk, vma);
tsk                45 arch/powerpc/include/asm/cputime.h #define get_accounting(tsk)	(&get_paca()->accounting)
tsk                46 arch/powerpc/include/asm/cputime.h static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
tsk                48 arch/powerpc/include/asm/cputime.h #define get_accounting(tsk)	(&task_thread_info(tsk)->accounting)
tsk                61 arch/powerpc/include/asm/hw_breakpoint.h extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
tsk                76 arch/powerpc/include/asm/hw_breakpoint.h extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
tsk                81 arch/powerpc/include/asm/hw_breakpoint.h static inline void thread_change_pc(struct task_struct *tsk,
tsk                 6 arch/powerpc/include/asm/membarrier.h 					     struct task_struct *tsk)
tsk                17 arch/powerpc/include/asm/mmu_context.h extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
tsk                57 arch/powerpc/include/asm/mmu_context.h extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
tsk                65 arch/powerpc/include/asm/mmu_context.h 				      struct task_struct *tsk)
tsk                69 arch/powerpc/include/asm/mmu_context.h 	return switch_slb(tsk, next);
tsk               105 arch/powerpc/include/asm/mmu_context.h 			       struct task_struct *tsk);
tsk               197 arch/powerpc/include/asm/mmu_context.h 			       struct task_struct *tsk);
tsk               200 arch/powerpc/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               205 arch/powerpc/include/asm/mmu_context.h 	switch_mm_irqs_off(prev, next, tsk);
tsk               211 arch/powerpc/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk               224 arch/powerpc/include/asm/mmu_context.h 				  struct task_struct *tsk)
tsk               177 arch/powerpc/include/asm/pkeys.h extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               179 arch/powerpc/include/asm/pkeys.h static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               194 arch/powerpc/include/asm/pkeys.h 	return __arch_set_user_pkey_access(tsk, pkey, init_val);
tsk               301 arch/powerpc/include/asm/processor.h #define task_pt_regs(tsk)	((struct pt_regs *)(tsk)->thread.regs)
tsk               305 arch/powerpc/include/asm/processor.h #define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
tsk               306 arch/powerpc/include/asm/processor.h #define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
tsk               309 arch/powerpc/include/asm/processor.h #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
tsk               310 arch/powerpc/include/asm/processor.h #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
tsk               312 arch/powerpc/include/asm/processor.h extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
tsk               313 arch/powerpc/include/asm/processor.h extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
tsk               315 arch/powerpc/include/asm/processor.h #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
tsk               316 arch/powerpc/include/asm/processor.h #define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
tsk               318 arch/powerpc/include/asm/processor.h extern int get_endian(struct task_struct *tsk, unsigned long adr);
tsk               319 arch/powerpc/include/asm/processor.h extern int set_endian(struct task_struct *tsk, unsigned int val);
tsk               321 arch/powerpc/include/asm/processor.h #define GET_UNALIGN_CTL(tsk, adr)	get_unalign_ctl((tsk), (adr))
tsk               322 arch/powerpc/include/asm/processor.h #define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
tsk               324 arch/powerpc/include/asm/processor.h extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
tsk               325 arch/powerpc/include/asm/processor.h extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
tsk                47 arch/powerpc/include/asm/task_size_64.h #define TASK_SIZE_OF(tsk)						\
tsk                48 arch/powerpc/include/asm/task_size_64.h 	(test_tsk_thread_flag(tsk, TIF_32BIT) ? TASK_SIZE_USER32 :	\
tsk                51 arch/powerpc/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk               185 arch/powerpc/kernel/hw_breakpoint.c void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
tsk               189 arch/powerpc/kernel/hw_breakpoint.c 	if (likely(!tsk->thread.last_hit_ubp))
tsk               192 arch/powerpc/kernel/hw_breakpoint.c 	info = counter_arch_bp(tsk->thread.last_hit_ubp);
tsk               195 arch/powerpc/kernel/hw_breakpoint.c 	tsk->thread.last_hit_ubp = NULL;
tsk               380 arch/powerpc/kernel/hw_breakpoint.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               382 arch/powerpc/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
tsk                88 arch/powerpc/kernel/process.c static void check_if_tm_restore_required(struct task_struct *tsk)
tsk                96 arch/powerpc/kernel/process.c 	if (tsk == current && tsk->thread.regs &&
tsk                97 arch/powerpc/kernel/process.c 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
tsk                99 arch/powerpc/kernel/process.c 		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
tsk               105 arch/powerpc/kernel/process.c static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
tsk               159 arch/powerpc/kernel/process.c static void __giveup_fpu(struct task_struct *tsk)
tsk               163 arch/powerpc/kernel/process.c 	save_fpu(tsk);
tsk               164 arch/powerpc/kernel/process.c 	msr = tsk->thread.regs->msr;
tsk               170 arch/powerpc/kernel/process.c 	tsk->thread.regs->msr = msr;
tsk               173 arch/powerpc/kernel/process.c void giveup_fpu(struct task_struct *tsk)
tsk               175 arch/powerpc/kernel/process.c 	check_if_tm_restore_required(tsk);
tsk               178 arch/powerpc/kernel/process.c 	__giveup_fpu(tsk);
tsk               187 arch/powerpc/kernel/process.c void flush_fp_to_thread(struct task_struct *tsk)
tsk               189 arch/powerpc/kernel/process.c 	if (tsk->thread.regs) {
tsk               199 arch/powerpc/kernel/process.c 		if (tsk->thread.regs->msr & MSR_FP) {
tsk               207 arch/powerpc/kernel/process.c 			BUG_ON(tsk != current);
tsk               208 arch/powerpc/kernel/process.c 			giveup_fpu(tsk);
tsk               240 arch/powerpc/kernel/process.c static int restore_fp(struct task_struct *tsk)
tsk               242 arch/powerpc/kernel/process.c 	if (tsk->thread.load_fp) {
tsk               250 arch/powerpc/kernel/process.c static int restore_fp(struct task_struct *tsk) { return 0; }
tsk               256 arch/powerpc/kernel/process.c static void __giveup_altivec(struct task_struct *tsk)
tsk               260 arch/powerpc/kernel/process.c 	save_altivec(tsk);
tsk               261 arch/powerpc/kernel/process.c 	msr = tsk->thread.regs->msr;
tsk               267 arch/powerpc/kernel/process.c 	tsk->thread.regs->msr = msr;
tsk               270 arch/powerpc/kernel/process.c void giveup_altivec(struct task_struct *tsk)
tsk               272 arch/powerpc/kernel/process.c 	check_if_tm_restore_required(tsk);
tsk               275 arch/powerpc/kernel/process.c 	__giveup_altivec(tsk);
tsk               309 arch/powerpc/kernel/process.c void flush_altivec_to_thread(struct task_struct *tsk)
tsk               311 arch/powerpc/kernel/process.c 	if (tsk->thread.regs) {
tsk               313 arch/powerpc/kernel/process.c 		if (tsk->thread.regs->msr & MSR_VEC) {
tsk               314 arch/powerpc/kernel/process.c 			BUG_ON(tsk != current);
tsk               315 arch/powerpc/kernel/process.c 			giveup_altivec(tsk);
tsk               322 arch/powerpc/kernel/process.c static int restore_altivec(struct task_struct *tsk)
tsk               324 arch/powerpc/kernel/process.c 	if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
tsk               325 arch/powerpc/kernel/process.c 		load_vr_state(&tsk->thread.vr_state);
tsk               326 arch/powerpc/kernel/process.c 		tsk->thread.used_vr = 1;
tsk               327 arch/powerpc/kernel/process.c 		tsk->thread.load_vec++;
tsk               335 arch/powerpc/kernel/process.c static inline int restore_altivec(struct task_struct *tsk) { return 0; }
tsk               339 arch/powerpc/kernel/process.c static void __giveup_vsx(struct task_struct *tsk)
tsk               341 arch/powerpc/kernel/process.c 	unsigned long msr = tsk->thread.regs->msr;
tsk               351 arch/powerpc/kernel/process.c 		__giveup_fpu(tsk);
tsk               353 arch/powerpc/kernel/process.c 		__giveup_altivec(tsk);
tsk               356 arch/powerpc/kernel/process.c static void giveup_vsx(struct task_struct *tsk)
tsk               358 arch/powerpc/kernel/process.c 	check_if_tm_restore_required(tsk);
tsk               361 arch/powerpc/kernel/process.c 	__giveup_vsx(tsk);
tsk               391 arch/powerpc/kernel/process.c void flush_vsx_to_thread(struct task_struct *tsk)
tsk               393 arch/powerpc/kernel/process.c 	if (tsk->thread.regs) {
tsk               395 arch/powerpc/kernel/process.c 		if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
tsk               396 arch/powerpc/kernel/process.c 			BUG_ON(tsk != current);
tsk               397 arch/powerpc/kernel/process.c 			giveup_vsx(tsk);
tsk               404 arch/powerpc/kernel/process.c static int restore_vsx(struct task_struct *tsk)
tsk               407 arch/powerpc/kernel/process.c 		tsk->thread.used_vsr = 1;
tsk               414 arch/powerpc/kernel/process.c static inline int restore_vsx(struct task_struct *tsk) { return 0; }
tsk               418 arch/powerpc/kernel/process.c void giveup_spe(struct task_struct *tsk)
tsk               420 arch/powerpc/kernel/process.c 	check_if_tm_restore_required(tsk);
tsk               423 arch/powerpc/kernel/process.c 	__giveup_spe(tsk);
tsk               441 arch/powerpc/kernel/process.c void flush_spe_to_thread(struct task_struct *tsk)
tsk               443 arch/powerpc/kernel/process.c 	if (tsk->thread.regs) {
tsk               445 arch/powerpc/kernel/process.c 		if (tsk->thread.regs->msr & MSR_SPE) {
tsk               446 arch/powerpc/kernel/process.c 			BUG_ON(tsk != current);
tsk               447 arch/powerpc/kernel/process.c 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
tsk               448 arch/powerpc/kernel/process.c 			giveup_spe(tsk);
tsk               479 arch/powerpc/kernel/process.c void giveup_all(struct task_struct *tsk)
tsk               483 arch/powerpc/kernel/process.c 	if (!tsk->thread.regs)
tsk               486 arch/powerpc/kernel/process.c 	check_if_tm_restore_required(tsk);
tsk               488 arch/powerpc/kernel/process.c 	usermsr = tsk->thread.regs->msr;
tsk               499 arch/powerpc/kernel/process.c 		__giveup_fpu(tsk);
tsk               503 arch/powerpc/kernel/process.c 		__giveup_altivec(tsk);
tsk               507 arch/powerpc/kernel/process.c 		__giveup_spe(tsk);
tsk               555 arch/powerpc/kernel/process.c static void save_all(struct task_struct *tsk)
tsk               559 arch/powerpc/kernel/process.c 	if (!tsk->thread.regs)
tsk               562 arch/powerpc/kernel/process.c 	usermsr = tsk->thread.regs->msr;
tsk               572 arch/powerpc/kernel/process.c 		save_fpu(tsk);
tsk               575 arch/powerpc/kernel/process.c 		save_altivec(tsk);
tsk               578 arch/powerpc/kernel/process.c 		__giveup_spe(tsk);
tsk               581 arch/powerpc/kernel/process.c 	thread_pkey_regs_save(&tsk->thread);
tsk               584 arch/powerpc/kernel/process.c void flush_all_to_thread(struct task_struct *tsk)
tsk               586 arch/powerpc/kernel/process.c 	if (tsk->thread.regs) {
tsk               588 arch/powerpc/kernel/process.c 		BUG_ON(tsk != current);
tsk               590 arch/powerpc/kernel/process.c 		if (tsk->thread.regs->msr & MSR_SPE)
tsk               591 arch/powerpc/kernel/process.c 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
tsk               593 arch/powerpc/kernel/process.c 		save_all(tsk);
tsk               824 arch/powerpc/kernel/process.c static inline bool tm_enabled(struct task_struct *tsk)
tsk               826 arch/powerpc/kernel/process.c 	return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
tsk               879 arch/powerpc/kernel/process.c static inline void tm_reclaim_task(struct task_struct *tsk)
tsk               891 arch/powerpc/kernel/process.c 	struct thread_struct *thr = &tsk->thread;
tsk               903 arch/powerpc/kernel/process.c 		 tsk->pid, thr->regs->nip,
tsk               910 arch/powerpc/kernel/process.c 		 tsk->pid);
tsk              1821 arch/powerpc/kernel/process.c int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
tsk              1823 arch/powerpc/kernel/process.c 	struct pt_regs *regs = tsk->thread.regs;
tsk              1844 arch/powerpc/kernel/process.c 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk              1845 arch/powerpc/kernel/process.c 			tsk->thread.fpexc_mode = val &
tsk              1863 arch/powerpc/kernel/process.c 	tsk->thread.fpexc_mode = __pack_fe01(val);
tsk              1866 arch/powerpc/kernel/process.c 			| tsk->thread.fpexc_mode;
tsk              1870 arch/powerpc/kernel/process.c int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
tsk              1874 arch/powerpc/kernel/process.c 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
tsk              1889 arch/powerpc/kernel/process.c 			tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk              1890 arch/powerpc/kernel/process.c 			val = tsk->thread.fpexc_mode;
tsk              1897 arch/powerpc/kernel/process.c 		val = __unpack_fe01(tsk->thread.fpexc_mode);
tsk              1901 arch/powerpc/kernel/process.c int set_endian(struct task_struct *tsk, unsigned int val)
tsk              1903 arch/powerpc/kernel/process.c 	struct pt_regs *regs = tsk->thread.regs;
tsk              1922 arch/powerpc/kernel/process.c int get_endian(struct task_struct *tsk, unsigned long adr)
tsk              1924 arch/powerpc/kernel/process.c 	struct pt_regs *regs = tsk->thread.regs;
tsk              1945 arch/powerpc/kernel/process.c int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
tsk              1947 arch/powerpc/kernel/process.c 	tsk->thread.align_ctl = val;
tsk              1951 arch/powerpc/kernel/process.c int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
tsk              1953 arch/powerpc/kernel/process.c 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
tsk              2031 arch/powerpc/kernel/process.c void show_stack(struct task_struct *tsk, unsigned long *stack)
tsk              2041 arch/powerpc/kernel/process.c 	if (tsk == NULL)
tsk              2042 arch/powerpc/kernel/process.c 		tsk = current;
tsk              2044 arch/powerpc/kernel/process.c 	if (!try_get_task_stack(tsk))
tsk              2049 arch/powerpc/kernel/process.c 		if (tsk == current)
tsk              2052 arch/powerpc/kernel/process.c 			sp = tsk->thread.ksp;
tsk              2058 arch/powerpc/kernel/process.c 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
tsk              2082 arch/powerpc/kernel/process.c 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
tsk              2095 arch/powerpc/kernel/process.c 	put_task_stack(tsk);
tsk               128 arch/powerpc/kernel/ptrace.c static void flush_tmregs_to_thread(struct task_struct *tsk)
tsk               138 arch/powerpc/kernel/ptrace.c 	if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
tsk               145 arch/powerpc/kernel/ptrace.c 		tm_save_sprs(&(tsk->thread));
tsk               149 arch/powerpc/kernel/ptrace.c static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
tsk               104 arch/powerpc/kernel/signal.c static void do_signal(struct task_struct *tsk)
tsk               111 arch/powerpc/kernel/signal.c 	BUG_ON(tsk != current);
tsk               116 arch/powerpc/kernel/signal.c 	check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
tsk               121 arch/powerpc/kernel/signal.c 		tsk->thread.regs->trap = 0;
tsk               131 arch/powerpc/kernel/signal.c 	if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
tsk               132 arch/powerpc/kernel/signal.c 		__set_breakpoint(&tsk->thread.hw_brk);
tsk               135 arch/powerpc/kernel/signal.c 	thread_change_pc(tsk, tsk->thread.regs);
tsk               137 arch/powerpc/kernel/signal.c 	rseq_signal_deliver(&ksig, tsk->thread.regs);
tsk               141 arch/powerpc/kernel/signal.c 			ret = handle_rt_signal32(&ksig, oldset, tsk);
tsk               143 arch/powerpc/kernel/signal.c 			ret = handle_signal32(&ksig, oldset, tsk);
tsk               145 arch/powerpc/kernel/signal.c 		ret = handle_rt_signal64(&ksig, oldset, tsk);
tsk               148 arch/powerpc/kernel/signal.c 	tsk->thread.regs->trap = 0;
tsk               179 arch/powerpc/kernel/signal.c unsigned long get_tm_stackpointer(struct task_struct *tsk)
tsk               203 arch/powerpc/kernel/signal.c 	unsigned long ret = tsk->thread.regs->gpr[1];
tsk               206 arch/powerpc/kernel/signal.c 	BUG_ON(tsk != current);
tsk               208 arch/powerpc/kernel/signal.c 	if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
tsk               211 arch/powerpc/kernel/signal.c 		if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
tsk               212 arch/powerpc/kernel/signal.c 			ret = tsk->thread.ckpt_regs.gpr[1];
tsk               221 arch/powerpc/kernel/signal.c 		tsk->thread.regs->msr &= ~MSR_TS_MASK;
tsk                19 arch/powerpc/kernel/signal.h 			   struct task_struct *tsk);
tsk                22 arch/powerpc/kernel/signal.h 			      struct task_struct *tsk);
tsk                32 arch/powerpc/kernel/signal.h extern unsigned long get_tm_stackpointer(struct task_struct *tsk);
tsk                48 arch/powerpc/kernel/signal.h 			      struct task_struct *tsk);
tsk                56 arch/powerpc/kernel/signal.h 				     struct task_struct *tsk)
tsk               897 arch/powerpc/kernel/signal_32.c 		       struct task_struct *tsk)
tsk               906 arch/powerpc/kernel/signal_32.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               912 arch/powerpc/kernel/signal_32.c 	BUG_ON(tsk != current);
tsk               916 arch/powerpc/kernel/signal_32.c 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
tsk               933 arch/powerpc/kernel/signal_32.c 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
tsk               935 arch/powerpc/kernel/signal_32.c 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
tsk               962 arch/powerpc/kernel/signal_32.c 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
tsk               987 arch/powerpc/kernel/signal_32.c 				   tsk->comm, tsk->pid,
tsk              1359 arch/powerpc/kernel/signal_32.c 		struct task_struct *tsk)
tsk              1367 arch/powerpc/kernel/signal_32.c 	struct pt_regs *regs = tsk->thread.regs;
tsk              1373 arch/powerpc/kernel/signal_32.c 	BUG_ON(tsk != current);
tsk              1376 arch/powerpc/kernel/signal_32.c 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
tsk              1395 arch/powerpc/kernel/signal_32.c 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
tsk              1397 arch/powerpc/kernel/signal_32.c 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
tsk              1419 arch/powerpc/kernel/signal_32.c 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
tsk              1439 arch/powerpc/kernel/signal_32.c 				   tsk->comm, tsk->pid,
tsk                92 arch/powerpc/kernel/signal_64.c 		struct task_struct *tsk, int signr, sigset_t *set,
tsk               107 arch/powerpc/kernel/signal_64.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               113 arch/powerpc/kernel/signal_64.c 	BUG_ON(tsk != current);
tsk               119 arch/powerpc/kernel/signal_64.c 	if (tsk->thread.used_vr) {
tsk               120 arch/powerpc/kernel/signal_64.c 		flush_altivec_to_thread(tsk);
tsk               122 arch/powerpc/kernel/signal_64.c 		err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
tsk               135 arch/powerpc/kernel/signal_64.c 		tsk->thread.vrsave = vrsave;
tsk               142 arch/powerpc/kernel/signal_64.c 	flush_fp_to_thread(tsk);
tsk               144 arch/powerpc/kernel/signal_64.c 	err |= copy_fpr_to_user(&sc->fp_regs, tsk);
tsk               157 arch/powerpc/kernel/signal_64.c 	if (tsk->thread.used_vsr && ctx_has_vsx_region) {
tsk               158 arch/powerpc/kernel/signal_64.c 		flush_vsx_to_thread(tsk);
tsk               160 arch/powerpc/kernel/signal_64.c 		err |= copy_vsx_to_user(v_regs, tsk);
tsk               194 arch/powerpc/kernel/signal_64.c 				 struct task_struct *tsk,
tsk               210 arch/powerpc/kernel/signal_64.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               213 arch/powerpc/kernel/signal_64.c 	BUG_ON(tsk != current);
tsk               223 arch/powerpc/kernel/signal_64.c 	msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
tsk               230 arch/powerpc/kernel/signal_64.c 	if (tsk->thread.used_vr) {
tsk               232 arch/powerpc/kernel/signal_64.c 		err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
tsk               239 arch/powerpc/kernel/signal_64.c 					      &tsk->thread.vr_state,
tsk               243 arch/powerpc/kernel/signal_64.c 					      &tsk->thread.ckvr_state,
tsk               255 arch/powerpc/kernel/signal_64.c 		tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
tsk               256 arch/powerpc/kernel/signal_64.c 	err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
tsk               258 arch/powerpc/kernel/signal_64.c 		err |= __put_user(tsk->thread.vrsave,
tsk               261 arch/powerpc/kernel/signal_64.c 		err |= __put_user(tsk->thread.ckvrsave,
tsk               270 arch/powerpc/kernel/signal_64.c 	err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
tsk               272 arch/powerpc/kernel/signal_64.c 		err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
tsk               274 arch/powerpc/kernel/signal_64.c 		err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
tsk               282 arch/powerpc/kernel/signal_64.c 	if (tsk->thread.used_vsr) {
tsk               286 arch/powerpc/kernel/signal_64.c 		err |= copy_ckvsx_to_user(v_regs, tsk);
tsk               289 arch/powerpc/kernel/signal_64.c 			err |= copy_vsx_to_user(tm_v_regs, tsk);
tsk               291 arch/powerpc/kernel/signal_64.c 			err |= copy_ckvsx_to_user(tm_v_regs, tsk);
tsk               305 arch/powerpc/kernel/signal_64.c 			      &tsk->thread.ckpt_regs, GP_REGS_SIZE);
tsk               321 arch/powerpc/kernel/signal_64.c static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
tsk               330 arch/powerpc/kernel/signal_64.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               335 arch/powerpc/kernel/signal_64.c 	BUG_ON(tsk != current);
tsk               379 arch/powerpc/kernel/signal_64.c 		err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
tsk               381 arch/powerpc/kernel/signal_64.c 		tsk->thread.used_vr = true;
tsk               382 arch/powerpc/kernel/signal_64.c 	} else if (tsk->thread.used_vr) {
tsk               383 arch/powerpc/kernel/signal_64.c 		memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
tsk               387 arch/powerpc/kernel/signal_64.c 		err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
tsk               389 arch/powerpc/kernel/signal_64.c 		tsk->thread.vrsave = 0;
tsk               391 arch/powerpc/kernel/signal_64.c 		mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
tsk               394 arch/powerpc/kernel/signal_64.c 	err |= copy_fpr_from_user(tsk, &sc->fp_regs);
tsk               403 arch/powerpc/kernel/signal_64.c 		err |= copy_vsx_from_user(tsk, v_regs);
tsk               404 arch/powerpc/kernel/signal_64.c 		tsk->thread.used_vsr = true;
tsk               407 arch/powerpc/kernel/signal_64.c 			tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
tsk               418 arch/powerpc/kernel/signal_64.c static long restore_tm_sigcontexts(struct task_struct *tsk,
tsk               427 arch/powerpc/kernel/signal_64.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               432 arch/powerpc/kernel/signal_64.c 	BUG_ON(tsk != current);
tsk               439 arch/powerpc/kernel/signal_64.c 	err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
tsk               451 arch/powerpc/kernel/signal_64.c 	err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
tsk               467 arch/powerpc/kernel/signal_64.c 	err |= __get_user(tsk->thread.ckpt_regs.ctr,
tsk               469 arch/powerpc/kernel/signal_64.c 	err |= __get_user(tsk->thread.ckpt_regs.link,
tsk               471 arch/powerpc/kernel/signal_64.c 	err |= __get_user(tsk->thread.ckpt_regs.xer,
tsk               473 arch/powerpc/kernel/signal_64.c 	err |= __get_user(tsk->thread.ckpt_regs.ccr,
tsk               502 arch/powerpc/kernel/signal_64.c 		err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
tsk               504 arch/powerpc/kernel/signal_64.c 		err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
tsk               508 arch/powerpc/kernel/signal_64.c 	else if (tsk->thread.used_vr) {
tsk               509 arch/powerpc/kernel/signal_64.c 		memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
tsk               510 arch/powerpc/kernel/signal_64.c 		memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
tsk               514 arch/powerpc/kernel/signal_64.c 		err |= __get_user(tsk->thread.ckvrsave,
tsk               516 arch/powerpc/kernel/signal_64.c 		err |= __get_user(tsk->thread.vrsave,
tsk               520 arch/powerpc/kernel/signal_64.c 		tsk->thread.vrsave = 0;
tsk               521 arch/powerpc/kernel/signal_64.c 		tsk->thread.ckvrsave = 0;
tsk               524 arch/powerpc/kernel/signal_64.c 		mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
tsk               527 arch/powerpc/kernel/signal_64.c 	err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
tsk               528 arch/powerpc/kernel/signal_64.c 	err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
tsk               538 arch/powerpc/kernel/signal_64.c 		err |= copy_vsx_from_user(tsk, tm_v_regs);
tsk               539 arch/powerpc/kernel/signal_64.c 		err |= copy_ckvsx_from_user(tsk, v_regs);
tsk               540 arch/powerpc/kernel/signal_64.c 		tsk->thread.used_vsr = true;
tsk               543 arch/powerpc/kernel/signal_64.c 			tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
tsk               544 arch/powerpc/kernel/signal_64.c 			tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
tsk               550 arch/powerpc/kernel/signal_64.c 	tsk->thread.tm_texasr |= TEXASR_FS;
tsk               580 arch/powerpc/kernel/signal_64.c 	tm_recheckpoint(&tsk->thread);
tsk               584 arch/powerpc/kernel/signal_64.c 		load_fp_state(&tsk->thread.fp_state);
tsk               585 arch/powerpc/kernel/signal_64.c 		regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
tsk               588 arch/powerpc/kernel/signal_64.c 		load_vr_state(&tsk->thread.vr_state);
tsk               816 arch/powerpc/kernel/signal_64.c 		struct task_struct *tsk)
tsk               821 arch/powerpc/kernel/signal_64.c 	struct pt_regs *regs = tsk->thread.regs;
tsk               827 arch/powerpc/kernel/signal_64.c 	BUG_ON(tsk != current);
tsk               829 arch/powerpc/kernel/signal_64.c 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0);
tsk               850 arch/powerpc/kernel/signal_64.c 					    tsk, ksig->sig, NULL,
tsk               857 arch/powerpc/kernel/signal_64.c 		err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
tsk               866 arch/powerpc/kernel/signal_64.c 	tsk->thread.fp_state.fpscr = 0;
tsk               869 arch/powerpc/kernel/signal_64.c 	if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
tsk               870 arch/powerpc/kernel/signal_64.c 		regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
tsk               920 arch/powerpc/kernel/signal_64.c 				   tsk->comm, tsk->pid, "setup_rt_frame",
tsk                30 arch/powerpc/kernel/stacktrace.c 			struct task_struct *tsk, int savesched)
tsk                36 arch/powerpc/kernel/stacktrace.c 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
tsk                66 arch/powerpc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                70 arch/powerpc/kernel/stacktrace.c 	if (!try_get_task_stack(tsk))
tsk                73 arch/powerpc/kernel/stacktrace.c 	if (tsk == current)
tsk                76 arch/powerpc/kernel/stacktrace.c 		sp = tsk->thread.ksp;
tsk                78 arch/powerpc/kernel/stacktrace.c 	save_context_stack(trace, sp, tsk, 0);
tsk                80 arch/powerpc/kernel/stacktrace.c 	put_task_stack(tsk);
tsk                98 arch/powerpc/kernel/stacktrace.c static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
tsk               103 arch/powerpc/kernel/stacktrace.c 	unsigned long stack_page = (unsigned long)task_stack_page(tsk);
tsk               109 arch/powerpc/kernel/stacktrace.c 	if (!is_idle_task(tsk)) {
tsk               133 arch/powerpc/kernel/stacktrace.c 	if (tsk == current)
tsk               136 arch/powerpc/kernel/stacktrace.c 		sp = tsk->thread.ksp;
tsk               185 arch/powerpc/kernel/stacktrace.c 		ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
tsk               205 arch/powerpc/kernel/stacktrace.c int save_stack_trace_tsk_reliable(struct task_struct *tsk,
tsk               214 arch/powerpc/kernel/stacktrace.c 	if (!try_get_task_stack(tsk))
tsk               217 arch/powerpc/kernel/stacktrace.c 	ret = __save_stack_trace_tsk_reliable(tsk, trace);
tsk               219 arch/powerpc/kernel/stacktrace.c 	put_task_stack(tsk);
tsk               321 arch/powerpc/kernel/time.c static unsigned long vtime_delta(struct task_struct *tsk,
tsk               326 arch/powerpc/kernel/time.c 	struct cpu_accounting_data *acct = get_accounting(tsk);
tsk               341 arch/powerpc/kernel/time.c void vtime_account_system(struct task_struct *tsk)
tsk               344 arch/powerpc/kernel/time.c 	struct cpu_accounting_data *acct = get_accounting(tsk);
tsk               346 arch/powerpc/kernel/time.c 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
tsk               351 arch/powerpc/kernel/time.c 	if ((tsk->flags & PF_VCPU) && !irq_count()) {
tsk               371 arch/powerpc/kernel/time.c void vtime_account_idle(struct task_struct *tsk)
tsk               374 arch/powerpc/kernel/time.c 	struct cpu_accounting_data *acct = get_accounting(tsk);
tsk               376 arch/powerpc/kernel/time.c 	stime = vtime_delta(tsk, &stime_scaled, &steal_time);
tsk               380 arch/powerpc/kernel/time.c static void vtime_flush_scaled(struct task_struct *tsk,
tsk               385 arch/powerpc/kernel/time.c 		tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
tsk               387 arch/powerpc/kernel/time.c 		tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
tsk               402 arch/powerpc/kernel/time.c void vtime_flush(struct task_struct *tsk)
tsk               404 arch/powerpc/kernel/time.c 	struct cpu_accounting_data *acct = get_accounting(tsk);
tsk               407 arch/powerpc/kernel/time.c 		account_user_time(tsk, cputime_to_nsecs(acct->utime));
tsk               410 arch/powerpc/kernel/time.c 		account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
tsk               421 arch/powerpc/kernel/time.c 		account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
tsk               425 arch/powerpc/kernel/time.c 		account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
tsk               428 arch/powerpc/kernel/time.c 		account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
tsk               431 arch/powerpc/kernel/time.c 	vtime_flush_scaled(tsk, acct);
tsk              1251 arch/powerpc/kvm/book3s_hv.c 				 struct task_struct *tsk)
tsk               182 arch/powerpc/mm/book3s64/mmu_context.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               243 arch/powerpc/mm/book3s64/pkeys.c int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               405 arch/powerpc/mm/book3s64/slb.c void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
tsk               407 arch/powerpc/mm/book3s64/slb.c 	struct thread_info *ti = task_thread_info(tsk);
tsk               483 arch/powerpc/mm/book3s64/slb.c 	tsk->thread.load_slb++;
tsk               484 arch/powerpc/mm/book3s64/slb.c 	if (!tsk->thread.load_slb) {
tsk               485 arch/powerpc/mm/book3s64/slb.c 		unsigned long pc = KSTK_EIP(tsk);
tsk                16 arch/powerpc/mm/mmu_context.c static inline void switch_mm_pgdir(struct task_struct *tsk,
tsk                20 arch/powerpc/mm/mmu_context.c 	tsk->thread.pgdir = mm->pgd;
tsk                23 arch/powerpc/mm/mmu_context.c static inline void switch_mm_pgdir(struct task_struct *tsk,
tsk                30 arch/powerpc/mm/mmu_context.c static inline void switch_mm_pgdir(struct task_struct *tsk,
tsk                35 arch/powerpc/mm/mmu_context.c 			struct task_struct *tsk)
tsk                71 arch/powerpc/mm/mmu_context.c 	switch_mm_pgdir(tsk, next);
tsk                87 arch/powerpc/mm/mmu_context.c 		membarrier_arch_switch_mm(prev, next, tsk);
tsk                93 arch/powerpc/mm/mmu_context.c 	switch_mmu_context(prev, next, tsk);
tsk               262 arch/powerpc/mm/nohash/mmu_context.c 			struct task_struct *tsk)
tsk                15 arch/powerpc/platforms/powernv/vas-trace.h 		TP_PROTO(struct task_struct *tsk,
tsk                20 arch/powerpc/platforms/powernv/vas-trace.h 		TP_ARGS(tsk, vasid, cop, rxattr),
tsk                23 arch/powerpc/platforms/powernv/vas-trace.h 			__field(struct task_struct *, tsk)
tsk                34 arch/powerpc/platforms/powernv/vas-trace.h 			__entry->pid = tsk->pid;
tsk                50 arch/powerpc/platforms/powernv/vas-trace.h 		TP_PROTO(struct task_struct *tsk,
tsk                55 arch/powerpc/platforms/powernv/vas-trace.h 		TP_ARGS(tsk, vasid, cop, txattr),
tsk                58 arch/powerpc/platforms/powernv/vas-trace.h 			__field(struct task_struct *, tsk)
tsk                68 arch/powerpc/platforms/powernv/vas-trace.h 			__entry->pid = tsk->pid;
tsk                82 arch/powerpc/platforms/powernv/vas-trace.h 		TP_PROTO(struct task_struct *tsk,
tsk                85 arch/powerpc/platforms/powernv/vas-trace.h 		TP_ARGS(tsk, win),
tsk                88 arch/powerpc/platforms/powernv/vas-trace.h 			__field(struct task_struct *, tsk)
tsk                97 arch/powerpc/platforms/powernv/vas-trace.h 			__entry->pid = tsk->pid;
tsk              3077 arch/powerpc/xmon/xmon.c static void show_task(struct task_struct *tsk)
tsk              3086 arch/powerpc/xmon/xmon.c 	state = (tsk->state == 0) ? 'R' :
tsk              3087 arch/powerpc/xmon/xmon.c 		(tsk->state < 0) ? 'U' :
tsk              3088 arch/powerpc/xmon/xmon.c 		(tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
tsk              3089 arch/powerpc/xmon/xmon.c 		(tsk->state & TASK_STOPPED) ? 'T' :
tsk              3090 arch/powerpc/xmon/xmon.c 		(tsk->state & TASK_TRACED) ? 'C' :
tsk              3091 arch/powerpc/xmon/xmon.c 		(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
tsk              3092 arch/powerpc/xmon/xmon.c 		(tsk->exit_state & EXIT_DEAD) ? 'E' :
tsk              3093 arch/powerpc/xmon/xmon.c 		(tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
tsk              3095 arch/powerpc/xmon/xmon.c 	printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
tsk              3096 arch/powerpc/xmon/xmon.c 		tsk->thread.ksp,
tsk              3097 arch/powerpc/xmon/xmon.c 		tsk->pid, rcu_dereference(tsk->parent)->pid,
tsk              3098 arch/powerpc/xmon/xmon.c 		state, task_cpu(tsk),
tsk              3099 arch/powerpc/xmon/xmon.c 		tsk->comm);
tsk              3121 arch/powerpc/xmon/xmon.c 	struct task_struct *tsk = NULL;
tsk              3131 arch/powerpc/xmon/xmon.c 		tsk = (struct task_struct *)tskv;
tsk              3133 arch/powerpc/xmon/xmon.c 	if (tsk == NULL)
tsk              3136 arch/powerpc/xmon/xmon.c 		mm = tsk->active_mm;
tsk              3140 arch/powerpc/xmon/xmon.c 		printf("*** Error dumping pte for task %px\n", tsk);
tsk              3217 arch/powerpc/xmon/xmon.c 	struct task_struct *tsk = NULL;
tsk              3222 arch/powerpc/xmon/xmon.c 		tsk = (struct task_struct *)tskv;
tsk              3226 arch/powerpc/xmon/xmon.c 		printf("*** Error dumping task %px\n", tsk);
tsk              3233 arch/powerpc/xmon/xmon.c 	if (tsk)
tsk              3234 arch/powerpc/xmon/xmon.c 		show_task(tsk);
tsk              3236 arch/powerpc/xmon/xmon.c 		for_each_process(tsk)
tsk              3237 arch/powerpc/xmon/xmon.c 			show_task(tsk);
tsk                41 arch/riscv/include/asm/processor.h #define task_pt_regs(tsk)						\
tsk                42 arch/riscv/include/asm/processor.h 	((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE		\
tsk                45 arch/riscv/include/asm/processor.h #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->sepc)
tsk                46 arch/riscv/include/asm/processor.h #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->sp)
tsk                54 arch/riscv/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk               157 arch/riscv/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               159 arch/riscv/kernel/stacktrace.c 	walk_stackframe(tsk, NULL, save_trace, trace);
tsk                61 arch/riscv/kernel/traps.c 	struct task_struct *tsk = current;
tsk                63 arch/riscv/kernel/traps.c 	if (show_unhandled_signals && unhandled_signal(tsk, signo)
tsk                66 arch/riscv/kernel/traps.c 			tsk->comm, task_pid_nr(tsk), signo, code, addr);
tsk                29 arch/riscv/mm/fault.c 	struct task_struct *tsk;
tsk                40 arch/riscv/mm/fault.c 	tsk = current;
tsk                41 arch/riscv/mm/fault.c 	mm = tsk->mm;
tsk               120 arch/riscv/mm/fault.c 	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
tsk               138 arch/riscv/mm/fault.c 			tsk->maj_flt++;
tsk               142 arch/riscv/mm/fault.c 			tsk->min_flt++;
tsk                18 arch/s390/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk                89 arch/s390/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               111 arch/s390/include/asm/mmu_context.h 	struct task_struct *tsk = current;
tsk               112 arch/s390/include/asm/mmu_context.h 	struct mm_struct *mm = tsk->mm;
tsk               125 arch/s390/include/asm/mmu_context.h #define enter_lazy_tlb(mm,tsk)	do { } while (0)
tsk               126 arch/s390/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                95 arch/s390/include/asm/processor.h #define TASK_SIZE_OF(tsk)	(test_tsk_thread_flag(tsk, TIF_31BIT) ? \
tsk               196 arch/s390/include/asm/processor.h static inline void release_thread(struct task_struct *tsk) { }
tsk               199 arch/s390/include/asm/processor.h void guarded_storage_release(struct task_struct *tsk);
tsk               202 arch/s390/include/asm/processor.h #define task_pt_regs(tsk) ((struct pt_regs *) \
tsk               203 arch/s390/include/asm/processor.h         (task_stack_page(tsk) + THREAD_SIZE) - 1)
tsk               204 arch/s390/include/asm/processor.h #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->psw.addr)
tsk               205 arch/s390/include/asm/processor.h #define KSTK_ESP(tsk)	(task_pt_regs(tsk)->gprs[15])
tsk               208 arch/s390/include/asm/processor.h #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
tsk                26 arch/s390/include/asm/runtime_instr.h void runtime_instr_release(struct task_struct *tsk);
tsk                45 arch/s390/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                50 arch/s390/include/asm/thread_info.h void arch_release_task_struct(struct task_struct *tsk);
tsk                15 arch/s390/kernel/guarded_storage.c void guarded_storage_release(struct task_struct *tsk)
tsk                17 arch/s390/kernel/guarded_storage.c 	kfree(tsk->thread.gs_cb);
tsk                18 arch/s390/kernel/guarded_storage.c 	kfree(tsk->thread.gs_bc_cb);
tsk               631 arch/s390/kernel/perf_cpum_sf.c 	struct task_struct *tsk;
tsk               637 arch/s390/kernel/perf_cpum_sf.c 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
tsk               639 arch/s390/kernel/perf_cpum_sf.c 	if (tsk) {
tsk               646 arch/s390/kernel/perf_cpum_sf.c 		pid = __task_pid_nr_ns(tsk, type, event->ns);
tsk               651 arch/s390/kernel/perf_cpum_sf.c 		if (!pid && !pid_alive(tsk))
tsk                62 arch/s390/kernel/process.c void arch_release_task_struct(struct task_struct *tsk)
tsk                64 arch/s390/kernel/process.c 	runtime_instr_release(tsk);
tsk                65 arch/s390/kernel/process.c 	guarded_storage_release(tsk);
tsk                26 arch/s390/kernel/runtime_instr.c void runtime_instr_release(struct task_struct *tsk)
tsk                28 arch/s390/kernel/runtime_instr.c 	kfree(tsk->thread.ri_cb);
tsk               282 arch/s390/kernel/smp.c static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
tsk               286 arch/s390/kernel/smp.c 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
tsk               288 arch/s390/kernel/smp.c 	lc->current_task = (unsigned long) tsk;
tsk               290 arch/s390/kernel/smp.c 	lc->current_pid = tsk->pid;
tsk               291 arch/s390/kernel/smp.c 	lc->user_timer = tsk->thread.user_timer;
tsk               292 arch/s390/kernel/smp.c 	lc->guest_timer = tsk->thread.guest_timer;
tsk               293 arch/s390/kernel/smp.c 	lc->system_timer = tsk->thread.system_timer;
tsk               294 arch/s390/kernel/smp.c 	lc->hardirq_timer = tsk->thread.hardirq_timer;
tsk               295 arch/s390/kernel/smp.c 	lc->softirq_timer = tsk->thread.softirq_timer;
tsk                45 arch/s390/kernel/uprobes.c bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
tsk                47 arch/s390/kernel/uprobes.c 	struct pt_regs *regs = task_pt_regs(tsk);
tsk               125 arch/s390/kernel/vtime.c static int do_account_vtime(struct task_struct *tsk)
tsk               154 arch/s390/kernel/vtime.c 	user = update_tsk_timer(&tsk->thread.user_timer,
tsk               156 arch/s390/kernel/vtime.c 	guest = update_tsk_timer(&tsk->thread.guest_timer,
tsk               158 arch/s390/kernel/vtime.c 	system = update_tsk_timer(&tsk->thread.system_timer,
tsk               160 arch/s390/kernel/vtime.c 	hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
tsk               162 arch/s390/kernel/vtime.c 	softirq = update_tsk_timer(&tsk->thread.softirq_timer,
tsk               169 arch/s390/kernel/vtime.c 		account_user_time(tsk, cputime_to_nsecs(user));
tsk               170 arch/s390/kernel/vtime.c 		tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
tsk               174 arch/s390/kernel/vtime.c 		account_guest_time(tsk, cputime_to_nsecs(guest));
tsk               175 arch/s390/kernel/vtime.c 		tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
tsk               179 arch/s390/kernel/vtime.c 		account_system_index_scaled(tsk, system, CPUTIME_SYSTEM);
tsk               181 arch/s390/kernel/vtime.c 		account_system_index_scaled(tsk, hardirq, CPUTIME_IRQ);
tsk               183 arch/s390/kernel/vtime.c 		account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
tsk               208 arch/s390/kernel/vtime.c void vtime_flush(struct task_struct *tsk)
tsk               212 arch/s390/kernel/vtime.c 	if (do_account_vtime(tsk))
tsk               229 arch/s390/kernel/vtime.c void vtime_account_irq_enter(struct task_struct *tsk)
tsk               237 arch/s390/kernel/vtime.c 	if ((tsk->flags & PF_VCPU) && (irq_count() == 0))
tsk               250 arch/s390/kernel/vtime.c void vtime_account_system(struct task_struct *tsk)
tsk               388 arch/s390/mm/fault.c 	struct task_struct *tsk;
tsk               397 arch/s390/mm/fault.c 	tsk = current;
tsk               407 arch/s390/mm/fault.c 	mm = tsk->mm;
tsk               500 arch/s390/mm/fault.c 			tsk->maj_flt++;
tsk               504 arch/s390/mm/fault.c 			tsk->min_flt++;
tsk               697 arch/s390/mm/fault.c 	struct task_struct *tsk;
tsk               713 arch/s390/mm/fault.c 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
tsk               714 arch/s390/mm/fault.c 	if (tsk)
tsk               715 arch/s390/mm/fault.c 		get_task_struct(tsk);
tsk               717 arch/s390/mm/fault.c 	if (!tsk)
tsk               722 arch/s390/mm/fault.c 		if (tsk->thread.pfault_wait == 1) {
tsk               728 arch/s390/mm/fault.c 			tsk->thread.pfault_wait = 0;
tsk               729 arch/s390/mm/fault.c 			list_del(&tsk->thread.list);
tsk               730 arch/s390/mm/fault.c 			wake_up_process(tsk);
tsk               731 arch/s390/mm/fault.c 			put_task_struct(tsk);
tsk               740 arch/s390/mm/fault.c 			if (tsk->state == TASK_RUNNING)
tsk               741 arch/s390/mm/fault.c 				tsk->thread.pfault_wait = -1;
tsk               745 arch/s390/mm/fault.c 		if (WARN_ON_ONCE(tsk != current))
tsk               747 arch/s390/mm/fault.c 		if (tsk->thread.pfault_wait == 1) {
tsk               750 arch/s390/mm/fault.c 		} else if (tsk->thread.pfault_wait == -1) {
tsk               754 arch/s390/mm/fault.c 			tsk->thread.pfault_wait = 0;
tsk               761 arch/s390/mm/fault.c 			get_task_struct(tsk);
tsk               762 arch/s390/mm/fault.c 			tsk->thread.pfault_wait = 1;
tsk               763 arch/s390/mm/fault.c 			list_add(&tsk->thread.list, &pfault_list);
tsk               769 arch/s390/mm/fault.c 			set_tsk_need_resched(tsk);
tsk               775 arch/s390/mm/fault.c 	put_task_struct(tsk);
tsk               781 arch/s390/mm/fault.c 	struct task_struct *tsk;
tsk               787 arch/s390/mm/fault.c 		tsk = container_of(thread, struct task_struct, thread);
tsk               788 arch/s390/mm/fault.c 		wake_up_process(tsk);
tsk               789 arch/s390/mm/fault.c 		put_task_struct(tsk);
tsk                27 arch/sh/include/asm/fpu.h #define save_fpu(tsk)			do { } while (0)
tsk                28 arch/sh/include/asm/fpu.h #define restore_fpu(tsk)		do { } while (0)
tsk                45 arch/sh/include/asm/fpu.h static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
tsk                47 arch/sh/include/asm/fpu.h 	if (task_thread_info(tsk)->status & TS_USEDFPU) {
tsk                48 arch/sh/include/asm/fpu.h 		task_thread_info(tsk)->status &= ~TS_USEDFPU;
tsk                49 arch/sh/include/asm/fpu.h 		save_fpu(tsk);
tsk                52 arch/sh/include/asm/fpu.h 		tsk->thread.fpu_counter = 0;
tsk                55 arch/sh/include/asm/fpu.h static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
tsk                58 arch/sh/include/asm/fpu.h 	__unlazy_fpu(tsk, regs);
tsk                62 arch/sh/include/asm/fpu.h static inline void clear_fpu(struct task_struct *tsk, struct pt_regs *regs)
tsk                65 arch/sh/include/asm/fpu.h 	if (task_thread_info(tsk)->status & TS_USEDFPU) {
tsk                66 arch/sh/include/asm/fpu.h 		task_thread_info(tsk)->status &= ~TS_USEDFPU;
tsk               100 arch/sh/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk               123 arch/sh/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               137 arch/sh/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)		do { } while (0)
tsk               138 arch/sh/include/asm/mmu_context.h #define enter_lazy_tlb(mm,tsk)		do { } while (0)
tsk               123 arch/sh/include/asm/processor.h #define GET_UNALIGN_CTL(tsk, addr)	get_unalign_ctl((tsk), (addr))
tsk               124 arch/sh/include/asm/processor.h #define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val))
tsk               171 arch/sh/include/asm/processor_32.h #define thread_saved_pc(tsk)	(tsk->thread.pc)
tsk               173 arch/sh/include/asm/processor_32.h void show_trace(struct task_struct *tsk, unsigned long *sp,
tsk               186 arch/sh/include/asm/processor_32.h #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
tsk               187 arch/sh/include/asm/processor_32.h #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
tsk               204 arch/sh/include/asm/processor_64.h #define thread_saved_pc(tsk)	(tsk->thread.pc)
tsk               208 arch/sh/include/asm/processor_64.h #define KSTK_EIP(tsk)  ((tsk)->thread.pc)
tsk               209 arch/sh/include/asm/processor_64.h #define KSTK_ESP(tsk)  ((tsk)->thread.sp)
tsk                19 arch/sh/include/asm/stacktrace.h void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
tsk                 7 arch/sh/include/asm/switch_to_32.h #define is_dsp_enabled(tsk)						\
tsk                 8 arch/sh/include/asm/switch_to_32.h 	(!!(tsk->thread.dsp_status.status & SR_DSP))
tsk                10 arch/sh/include/asm/switch_to_32.h #define __restore_dsp(tsk)						\
tsk                13 arch/sh/include/asm/switch_to_32.h 			(u32 *)&tsk->thread.dsp_status;			\
tsk                33 arch/sh/include/asm/switch_to_32.h #define __save_dsp(tsk)							\
tsk                36 arch/sh/include/asm/switch_to_32.h 			(u32 *)&tsk->thread.dsp_status + 14;		\
tsk                59 arch/sh/include/asm/switch_to_32.h #define is_dsp_enabled(tsk)	(0)
tsk                60 arch/sh/include/asm/switch_to_32.h #define __save_dsp(tsk)		do { } while (0)
tsk                61 arch/sh/include/asm/switch_to_32.h #define __restore_dsp(tsk)	do { } while (0)
tsk                56 arch/sh/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                58 arch/sh/include/asm/thread_info.h 	.task		= &tsk,			\
tsk                95 arch/sh/include/asm/thread_info.h extern void arch_release_task_struct(struct task_struct *tsk);
tsk                11 arch/sh/kernel/cpu/fpu.c int init_fpu(struct task_struct *tsk)
tsk                13 arch/sh/kernel/cpu/fpu.c 	if (tsk_used_math(tsk)) {
tsk                14 arch/sh/kernel/cpu/fpu.c 		if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
tsk                15 arch/sh/kernel/cpu/fpu.c 			unlazy_fpu(tsk, task_pt_regs(tsk));
tsk                22 arch/sh/kernel/cpu/fpu.c 	if (!tsk->thread.xstate) {
tsk                23 arch/sh/kernel/cpu/fpu.c 		tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
tsk                25 arch/sh/kernel/cpu/fpu.c 		if (!tsk->thread.xstate)
tsk                30 arch/sh/kernel/cpu/fpu.c 		struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
tsk                34 arch/sh/kernel/cpu/fpu.c 		struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
tsk                39 arch/sh/kernel/cpu/fpu.c 	set_stopped_child_used_math(tsk);
tsk                46 arch/sh/kernel/cpu/fpu.c 	struct task_struct *tsk = current;
tsk                48 arch/sh/kernel/cpu/fpu.c 	restore_fpu(tsk);
tsk                50 arch/sh/kernel/cpu/fpu.c 	task_thread_info(tsk)->status |= TS_USEDFPU;
tsk                51 arch/sh/kernel/cpu/fpu.c 	tsk->thread.fpu_counter++;
tsk                56 arch/sh/kernel/cpu/fpu.c 	struct task_struct *tsk = current;
tsk                64 arch/sh/kernel/cpu/fpu.c 	if (!tsk_used_math(tsk)) {
tsk                69 arch/sh/kernel/cpu/fpu.c 		if (init_fpu(tsk)) {
tsk                27 arch/sh/kernel/cpu/sh2a/fpu.c void save_fpu(struct task_struct *tsk)
tsk                52 arch/sh/kernel/cpu/sh2a/fpu.c 		     : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)),
tsk                60 arch/sh/kernel/cpu/sh2a/fpu.c void restore_fpu(struct task_struct *tsk)
tsk                84 arch/sh/kernel/cpu/sh2a/fpu.c 		     : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
tsk               454 arch/sh/kernel/cpu/sh2a/fpu.c 		struct task_struct *tsk = current;
tsk               456 arch/sh/kernel/cpu/sh2a/fpu.c 		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) {
tsk               458 arch/sh/kernel/cpu/sh2a/fpu.c 			denormal_to_double (&tsk->thread.xstate->hardfpu,
tsk               466 arch/sh/kernel/cpu/sh2a/fpu.c 		struct task_struct *tsk = current;
tsk               473 arch/sh/kernel/cpu/sh2a/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
tsk               474 arch/sh/kernel/cpu/sh2a/fpu.c 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               475 arch/sh/kernel/cpu/sh2a/fpu.c 		fpscr = tsk->thread.xstate->hardfpu.fpscr;
tsk               485 arch/sh/kernel/cpu/sh2a/fpu.c 			       | tsk->thread.xstate->hardfpu.fp_regs[n+1];
tsk               487 arch/sh/kernel/cpu/sh2a/fpu.c 			       | tsk->thread.xstate->hardfpu.fp_regs[m+1];
tsk               492 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk               493 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
tsk               502 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
tsk               509 arch/sh/kernel/cpu/sh2a/fpu.c 		struct task_struct *tsk = current;
tsk               516 arch/sh/kernel/cpu/sh2a/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
tsk               517 arch/sh/kernel/cpu/sh2a/fpu.c 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               518 arch/sh/kernel/cpu/sh2a/fpu.c 		fpscr = tsk->thread.xstate->hardfpu.fpscr;
tsk               528 arch/sh/kernel/cpu/sh2a/fpu.c 			       | tsk->thread.xstate->hardfpu.fp_regs[n+1];
tsk               530 arch/sh/kernel/cpu/sh2a/fpu.c 			       | tsk->thread.xstate->hardfpu.fp_regs[m+1];
tsk               535 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk               536 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff;
tsk               545 arch/sh/kernel/cpu/sh2a/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
tsk               558 arch/sh/kernel/cpu/sh2a/fpu.c 	struct task_struct *tsk = current;
tsk               561 arch/sh/kernel/cpu/sh2a/fpu.c 	__unlazy_fpu(tsk, regs);
tsk               563 arch/sh/kernel/cpu/sh2a/fpu.c 		tsk->thread.xstate->hardfpu.fpscr &=
tsk               566 arch/sh/kernel/cpu/sh2a/fpu.c 		restore_fpu(tsk);
tsk               567 arch/sh/kernel/cpu/sh2a/fpu.c 		task_thread_info(tsk)->status |= TS_USEDFPU;
tsk                41 arch/sh/kernel/cpu/sh4/fpu.c void save_fpu(struct task_struct *tsk)
tsk                84 arch/sh/kernel/cpu/sh4/fpu.c 		      :"0"((char *)(&tsk->thread.xstate->hardfpu.status)),
tsk                91 arch/sh/kernel/cpu/sh4/fpu.c void restore_fpu(struct task_struct *tsk)
tsk               134 arch/sh/kernel/cpu/sh4/fpu.c 		      :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG)
tsk               228 arch/sh/kernel/cpu/sh4/fpu.c 		struct task_struct *tsk = current;
tsk               230 arch/sh/kernel/cpu/sh4/fpu.c 		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR))
tsk               232 arch/sh/kernel/cpu/sh4/fpu.c 			denormal_to_double(&tsk->thread.xstate->hardfpu,
tsk               241 arch/sh/kernel/cpu/sh4/fpu.c 		struct task_struct *tsk = current;
tsk               248 arch/sh/kernel/cpu/sh4/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
tsk               249 arch/sh/kernel/cpu/sh4/fpu.c 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               250 arch/sh/kernel/cpu/sh4/fpu.c 		fpscr = tsk->thread.xstate->hardfpu.fpscr;
tsk               260 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
tsk               262 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
tsk               264 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk               265 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
tsk               271 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
tsk               279 arch/sh/kernel/cpu/sh4/fpu.c 		struct task_struct *tsk = current;
tsk               286 arch/sh/kernel/cpu/sh4/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
tsk               287 arch/sh/kernel/cpu/sh4/fpu.c 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               288 arch/sh/kernel/cpu/sh4/fpu.c 		fpscr = tsk->thread.xstate->hardfpu.fpscr;
tsk               298 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
tsk               300 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
tsk               305 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk               306 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
tsk               315 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
tsk               323 arch/sh/kernel/cpu/sh4/fpu.c 		struct task_struct *tsk = current;
tsk               330 arch/sh/kernel/cpu/sh4/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[n];
tsk               331 arch/sh/kernel/cpu/sh4/fpu.c 		hy = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               332 arch/sh/kernel/cpu/sh4/fpu.c 		fpscr = tsk->thread.xstate->hardfpu.fpscr;
tsk               342 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[n + 1];
tsk               344 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
tsk               348 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32;
tsk               349 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff;
tsk               355 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fp_regs[n] = hx;
tsk               363 arch/sh/kernel/cpu/sh4/fpu.c 		struct task_struct *tsk = current;
tsk               368 arch/sh/kernel/cpu/sh4/fpu.c 		hx = tsk->thread.xstate->hardfpu.fp_regs[m];
tsk               370 arch/sh/kernel/cpu/sh4/fpu.c 		if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)
tsk               375 arch/sh/kernel/cpu/sh4/fpu.c 			llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32)
tsk               376 arch/sh/kernel/cpu/sh4/fpu.c 			    | tsk->thread.xstate->hardfpu.fp_regs[m + 1];
tsk               378 arch/sh/kernel/cpu/sh4/fpu.c 			tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx);
tsk               396 arch/sh/kernel/cpu/sh4/fpu.c 	struct task_struct *tsk = current;
tsk               397 arch/sh/kernel/cpu/sh4/fpu.c 	int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr);
tsk               403 arch/sh/kernel/cpu/sh4/fpu.c 	struct task_struct *tsk = current;
tsk               406 arch/sh/kernel/cpu/sh4/fpu.c 	__unlazy_fpu(tsk, regs);
tsk               409 arch/sh/kernel/cpu/sh4/fpu.c 		tsk->thread.xstate->hardfpu.fpscr &=
tsk               411 arch/sh/kernel/cpu/sh4/fpu.c 		tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags;
tsk               414 arch/sh/kernel/cpu/sh4/fpu.c 		tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10);
tsk               416 arch/sh/kernel/cpu/sh4/fpu.c 		restore_fpu(tsk);
tsk               417 arch/sh/kernel/cpu/sh4/fpu.c 		task_thread_info(tsk)->status |= TS_USEDFPU;
tsk               418 arch/sh/kernel/cpu/sh4/fpu.c 		if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) &
tsk                16 arch/sh/kernel/cpu/sh5/fpu.c void save_fpu(struct task_struct *tsk)
tsk                54 arch/sh/kernel/cpu/sh5/fpu.c 		: "r" (&tsk->thread.xstate->hardfpu)
tsk                58 arch/sh/kernel/cpu/sh5/fpu.c void restore_fpu(struct task_struct *tsk)
tsk                97 arch/sh/kernel/cpu/sh5/fpu.c 		: "r" (&tsk->thread.xstate->hardfpu)
tsk               129 arch/sh/kernel/dumpstack.c void show_trace(struct task_struct *tsk, unsigned long *sp,
tsk               137 arch/sh/kernel/dumpstack.c 	unwind_stack(tsk, regs, sp, &print_trace_ops, "");
tsk               141 arch/sh/kernel/dumpstack.c 	if (!tsk)
tsk               142 arch/sh/kernel/dumpstack.c 		tsk = current;
tsk               144 arch/sh/kernel/dumpstack.c 	debug_show_held_locks(tsk);
tsk               147 arch/sh/kernel/dumpstack.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               151 arch/sh/kernel/dumpstack.c 	if (!tsk)
tsk               152 arch/sh/kernel/dumpstack.c 		tsk = current;
tsk               153 arch/sh/kernel/dumpstack.c 	if (tsk == current)
tsk               156 arch/sh/kernel/dumpstack.c 		sp = (unsigned long *)tsk->thread.sp;
tsk               160 arch/sh/kernel/dumpstack.c 		 (unsigned long)task_stack_page(tsk));
tsk               161 arch/sh/kernel/dumpstack.c 	show_trace(tsk, sp, NULL);
tsk               262 arch/sh/kernel/hw_breakpoint.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               265 arch/sh/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
tsk                42 arch/sh/kernel/process.c void free_thread_xstate(struct task_struct *tsk)
tsk                44 arch/sh/kernel/process.c 	if (tsk->thread.xstate) {
tsk                45 arch/sh/kernel/process.c 		kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
tsk                46 arch/sh/kernel/process.c 		tsk->thread.xstate = NULL;
tsk                50 arch/sh/kernel/process.c void arch_release_task_struct(struct task_struct *tsk)
tsk                52 arch/sh/kernel/process.c 	free_thread_xstate(tsk);
tsk                80 arch/sh/kernel/process_32.c 	struct task_struct *tsk = current;
tsk                82 arch/sh/kernel/process_32.c 	flush_ptrace_hw_breakpoint(tsk);
tsk                86 arch/sh/kernel/process_32.c 	clear_fpu(tsk, task_pt_regs(tsk));
tsk               102 arch/sh/kernel/process_32.c 	struct task_struct *tsk = current;
tsk               104 arch/sh/kernel/process_32.c 	fpvalid = !!tsk_used_math(tsk);
tsk               106 arch/sh/kernel/process_32.c 		fpvalid = !fpregs_get(tsk, NULL, 0,
tsk               125 arch/sh/kernel/process_32.c 	struct task_struct *tsk = current;
tsk               127 arch/sh/kernel/process_32.c 	if (is_dsp_enabled(tsk)) {
tsk               132 arch/sh/kernel/process_32.c 		p->thread.dsp_status = tsk->thread.dsp_status;
tsk               278 arch/sh/kernel/process_64.c 		void show_stack(struct task_struct *tsk, unsigned long *sp);
tsk               280 arch/sh/kernel/process_64.c 		struct task_struct *tsk = get_current();
tsk               282 arch/sh/kernel/process_64.c 		tsk->thread.kregs = regs;
tsk               284 arch/sh/kernel/process_64.c 		show_stack(tsk, (unsigned long *)sp);
tsk               291 arch/sh/kernel/process_64.c void exit_thread(struct task_struct *tsk)
tsk               310 arch/sh/kernel/process_64.c 	if (last_task_used_math == tsk)
tsk               348 arch/sh/kernel/process_64.c 	struct task_struct *tsk = current;
tsk               350 arch/sh/kernel/process_64.c 	fpvalid = !!tsk_used_math(tsk);
tsk               354 arch/sh/kernel/process_64.c 			save_fpu(tsk);
tsk               360 arch/sh/kernel/process_64.c 		memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
tsk                77 arch/sh/kernel/ptrace_32.c static int set_single_step(struct task_struct *tsk, unsigned long addr)
tsk                79 arch/sh/kernel/ptrace_32.c 	struct thread_struct *thread = &tsk->thread;
tsk                92 arch/sh/kernel/ptrace_32.c 						 NULL, tsk);
tsk                80 arch/sh/kernel/signal_32.c 	struct task_struct *tsk = current;
tsk                86 arch/sh/kernel/signal_32.c 	return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0],
tsk                93 arch/sh/kernel/signal_32.c 	struct task_struct *tsk = current;
tsk               109 arch/sh/kernel/signal_32.c 	unlazy_fpu(tsk, regs);
tsk               110 arch/sh/kernel/signal_32.c 	return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu,
tsk               137 arch/sh/kernel/signal_32.c 		struct task_struct *tsk = current;
tsk               140 arch/sh/kernel/signal_32.c 		clear_fpu(tsk, regs);
tsk               214 arch/sh/kernel/smp.c int __cpu_up(unsigned int cpu, struct task_struct *tsk)
tsk               221 arch/sh/kernel/smp.c 	stack_start.sp = tsk->thread.sp;
tsk               222 arch/sh/kernel/smp.c 	stack_start.thread_info = tsk->stack;
tsk                80 arch/sh/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                82 arch/sh/kernel/stacktrace.c 	unsigned long *sp = (unsigned long *)tsk->thread.sp;
tsk               552 arch/sh/math-emu/math.c 		struct task_struct *tsk = current;
tsk               554 arch/sh/math-emu/math.c 		if ((tsk->thread.xstate->softfpu.fpscr & (1 << 17))) {
tsk               556 arch/sh/math-emu/math.c 			denormal_to_double (&tsk->thread.xstate->softfpu,
tsk               558 arch/sh/math-emu/math.c 			tsk->thread.xstate->softfpu.fpscr &=
tsk               560 arch/sh/math-emu/math.c 			task_thread_info(tsk)->status |= TS_USEDFPU;
tsk               597 arch/sh/math-emu/math.c 	struct task_struct *tsk = current;
tsk               598 arch/sh/math-emu/math.c 	struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
tsk               602 arch/sh/math-emu/math.c 	if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
tsk               605 arch/sh/math-emu/math.c 		task_thread_info(tsk)->status |= TS_USEDFPU;
tsk                83 arch/sh/mm/alignment.c int get_unalign_ctl(struct task_struct *tsk, unsigned long addr)
tsk                85 arch/sh/mm/alignment.c 	return put_user(tsk->thread.flags & SH_THREAD_UAC_MASK,
tsk                89 arch/sh/mm/alignment.c int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
tsk                91 arch/sh/mm/alignment.c 	tsk->thread.flags = (tsk->thread.flags & ~SH_THREAD_UAC_MASK) |
tsk                96 arch/sh/mm/alignment.c void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
tsk               102 arch/sh/mm/alignment.c 			  tsk->comm, task_pid_nr(tsk),
tsk               107 arch/sh/mm/alignment.c 			  tsk->comm, task_pid_nr(tsk),
tsk               285 arch/sh/mm/fault.c 	struct task_struct *tsk = current;
tsk               286 arch/sh/mm/fault.c 	struct mm_struct *mm = tsk->mm;
tsk               379 arch/sh/mm/fault.c 	struct task_struct *tsk;
tsk               385 arch/sh/mm/fault.c 	tsk = current;
tsk               386 arch/sh/mm/fault.c 	mm = tsk->mm;
tsk               475 arch/sh/mm/fault.c 			tsk->maj_flt++;
tsk               479 arch/sh/mm/fault.c 			tsk->min_flt++;
tsk                 9 arch/sparc/include/asm/mmu_context_32.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                16 arch/sparc/include/asm/mmu_context_32.h int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
tsk                27 arch/sparc/include/asm/mmu_context_32.h 	       struct task_struct *tsk);
tsk                29 arch/sparc/include/asm/mmu_context_32.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                19 arch/sparc/include/asm/mmu_context_64.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                29 arch/sparc/include/asm/mmu_context_64.h int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
tsk                81 arch/sparc/include/asm/mmu_context_64.h static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
tsk               139 arch/sparc/include/asm/mmu_context_64.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                94 arch/sparc/include/asm/processor_32.h #define release_thread(tsk)		do { } while(0)
tsk                98 arch/sparc/include/asm/processor_32.h #define task_pt_regs(tsk) ((tsk)->thread.kregs)
tsk                99 arch/sparc/include/asm/processor_32.h #define KSTK_EIP(tsk)  ((tsk)->thread.kregs->pc)
tsk               100 arch/sparc/include/asm/processor_32.h #define KSTK_ESP(tsk)  ((tsk)->thread.kregs->u_regs[UREG_FP])
tsk                30 arch/sparc/include/asm/processor_64.h #define TASK_SIZE_OF(tsk) \
tsk                31 arch/sparc/include/asm/processor_64.h 	(test_tsk_thread_flag(tsk,TIF_32BIT) ? \
tsk               184 arch/sparc/include/asm/processor_64.h #define release_thread(tsk)		do { } while (0)
tsk               188 arch/sparc/include/asm/processor_64.h #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
tsk               189 arch/sparc/include/asm/processor_64.h #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
tsk               190 arch/sparc/include/asm/processor_64.h #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
tsk                57 arch/sparc/include/asm/thread_info_32.h #define INIT_THREAD_INFO(tsk)				\
tsk                60 arch/sparc/include/asm/thread_info_32.h 	.task		=	&tsk,			\
tsk               116 arch/sparc/include/asm/thread_info_64.h #define INIT_THREAD_INFO(tsk)				\
tsk               118 arch/sparc/include/asm/thread_info_64.h 	.task		=	&tsk,			\
tsk                43 arch/sparc/include/asm/uprobes.h extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
tsk               151 arch/sparc/kernel/process_32.c void show_stack(struct task_struct *tsk, unsigned long *_ksp)
tsk               158 arch/sparc/kernel/process_32.c 	if (!tsk)
tsk               159 arch/sparc/kernel/process_32.c 		tsk = current;
tsk               161 arch/sparc/kernel/process_32.c 	if (tsk == current && !_ksp)
tsk               164 arch/sparc/kernel/process_32.c 	task_base = (unsigned long) task_stack_page(tsk);
tsk               183 arch/sparc/kernel/process_32.c void exit_thread(struct task_struct *tsk)
tsk               186 arch/sparc/kernel/process_32.c 	if (last_task_used_math == tsk) {
tsk               188 arch/sparc/kernel/process_32.c 	if (test_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU)) {
tsk               192 arch/sparc/kernel/process_32.c 		fpsave(&tsk->thread.float_regs[0], &tsk->thread.fsr,
tsk               193 arch/sparc/kernel/process_32.c 		       &tsk->thread.fpqueue[0], &tsk->thread.fpqdepth);
tsk               197 arch/sparc/kernel/process_32.c 		clear_ti_thread_flag(task_thread_info(tsk), TIF_USEDFPU);
tsk               411 arch/sparc/kernel/process_64.c void exit_thread(struct task_struct *tsk)
tsk               413 arch/sparc/kernel/process_64.c 	struct thread_info *t = task_thread_info(tsk);
tsk                83 arch/sparc/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                85 arch/sparc/kernel/stacktrace.c 	struct thread_info *tp = task_thread_info(tsk);
tsk              2455 arch/sparc/kernel/traps_64.c void show_stack(struct task_struct *tsk, unsigned long *_ksp)
tsk              2465 arch/sparc/kernel/traps_64.c 	if (!tsk)
tsk              2466 arch/sparc/kernel/traps_64.c 		tsk = current;
tsk              2467 arch/sparc/kernel/traps_64.c 	tp = task_thread_info(tsk);
tsk              2469 arch/sparc/kernel/traps_64.c 		if (tsk == current)
tsk              2504 arch/sparc/kernel/traps_64.c 			ret_stack = ftrace_graph_get_ret_stack(tsk, graph);
tsk                40 arch/sparc/mm/fault_32.c 				       struct task_struct *tsk,
tsk                51 arch/sparc/mm/fault_32.c 		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
tsk                53 arch/sparc/mm/fault_32.c 		(tsk->mm ? (unsigned long) tsk->mm->pgd :
tsk                54 arch/sparc/mm/fault_32.c 			(unsigned long) tsk->active_mm->pgd));
tsk               108 arch/sparc/mm/fault_32.c 		unsigned long address, struct task_struct *tsk)
tsk               110 arch/sparc/mm/fault_32.c 	if (!unhandled_signal(tsk, sig))
tsk               117 arch/sparc/mm/fault_32.c 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk               118 arch/sparc/mm/fault_32.c 	       tsk->comm, task_pid_nr(tsk), address,
tsk               164 arch/sparc/mm/fault_32.c 	struct task_struct *tsk = current;
tsk               165 arch/sparc/mm/fault_32.c 	struct mm_struct *mm = tsk->mm;
tsk               325 arch/sparc/mm/fault_32.c 	unhandled_fault(address, tsk, regs);
tsk               356 arch/sparc/mm/fault_32.c 		pgd = tsk->active_mm->pgd + offset;
tsk               381 arch/sparc/mm/fault_32.c 	struct task_struct *tsk = current;
tsk               382 arch/sparc/mm/fault_32.c 	struct mm_struct *mm = tsk->mm;
tsk               417 arch/sparc/mm/fault_32.c 	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
tsk               422 arch/sparc/mm/fault_32.c 	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
tsk                42 arch/sparc/mm/fault_64.c 				      struct task_struct *tsk,
tsk                53 arch/sparc/mm/fault_64.c 	       (tsk->mm ?
tsk                54 arch/sparc/mm/fault_64.c 		CTX_HWBITS(tsk->mm->context) :
tsk                55 arch/sparc/mm/fault_64.c 		CTX_HWBITS(tsk->active_mm->context)));
tsk                57 arch/sparc/mm/fault_64.c 	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
tsk                58 arch/sparc/mm/fault_64.c 		          (unsigned long) tsk->active_mm->pgd));
tsk               135 arch/sparc/mm/fault_64.c 		unsigned long address, struct task_struct *tsk)
tsk               137 arch/sparc/mm/fault_64.c 	if (!unhandled_signal(tsk, sig))
tsk               144 arch/sparc/mm/fault_64.c 	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk               145 arch/sparc/mm/fault_64.c 	       tsk->comm, task_pid_nr(tsk), address,
tsk               494 arch/sparc/mm/srmmu.c 	       struct task_struct *tsk)
tsk              1016 arch/sparc/mm/srmmu.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk               536 arch/sparc/mm/tsb.c int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
tsk                44 arch/um/include/asm/mmu_context.h #define deactivate_mm(tsk,mm)	do { } while (0)
tsk                61 arch/um/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk                74 arch/um/include/asm/mmu_context.h 				  struct task_struct *tsk)
tsk               100 arch/um/include/asm/processor-generic.h #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
tsk                41 arch/um/include/asm/stacktrace.h void dump_trace(struct task_struct *tsk, const struct stacktrace_ops *ops, void *data);
tsk                33 arch/um/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                35 arch/um/include/asm/thread_info.h 	.task =		&tsk,			\
tsk                16 arch/um/kernel/stacktrace.c void dump_trace(struct task_struct *tsk,
tsk                22 arch/um/kernel/stacktrace.c 	struct pt_regs *segv_regs = tsk->thread.segv_regs;
tsk                25 arch/um/kernel/stacktrace.c 	bp = get_frame_pointer(tsk, segv_regs);
tsk                26 arch/um/kernel/stacktrace.c 	sp = get_stack_pointer(tsk, segv_regs);
tsk                60 arch/um/kernel/stacktrace.c static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
tsk                62 arch/um/kernel/stacktrace.c 	dump_trace(tsk, &dump_ops, trace);
tsk                71 arch/um/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk                73 arch/um/kernel/stacktrace.c 	__save_stack_trace(tsk, trace);
tsk               144 arch/um/kernel/trap.c 	struct task_struct *tsk = current;
tsk               147 arch/um/kernel/trap.c 	if (!unhandled_signal(tsk, SIGSEGV))
tsk               154 arch/um/kernel/trap.c 		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk               155 arch/um/kernel/trap.c 		tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi),
tsk                21 arch/unicore32/include/asm/mmu_context.h #define init_new_context(tsk, mm)	0
tsk                35 arch/unicore32/include/asm/mmu_context.h enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                47 arch/unicore32/include/asm/mmu_context.h 	  struct task_struct *tsk)
tsk                55 arch/unicore32/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
tsk                69 arch/unicore32/include/asm/processor.h #define KSTK_EIP(tsk)	(task_pt_regs(tsk)->UCreg_pc)
tsk                70 arch/unicore32/include/asm/processor.h #define KSTK_ESP(tsk)	(task_pt_regs(tsk)->UCreg_sp)
tsk                79 arch/unicore32/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)						\
tsk                81 arch/unicore32/include/asm/thread_info.h 	.task		= &tsk,						\
tsk                98 arch/unicore32/include/asm/thread_info.h #define thread_saved_pc(tsk)	\
tsk                99 arch/unicore32/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
tsk               100 arch/unicore32/include/asm/thread_info.h #define thread_saved_sp(tsk)	\
tsk               101 arch/unicore32/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
tsk               102 arch/unicore32/include/asm/thread_info.h #define thread_saved_fp(tsk)	\
tsk               103 arch/unicore32/include/asm/thread_info.h 	((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
tsk               206 arch/unicore32/kernel/process.c 	struct task_struct *tsk = current;
tsk               209 arch/unicore32/kernel/process.c 	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
tsk                61 arch/unicore32/kernel/ptrace.c static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
tsk                68 arch/unicore32/kernel/ptrace.c 		tmp = get_user_reg(tsk, off >> 2);
tsk                76 arch/unicore32/kernel/ptrace.c static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
tsk                82 arch/unicore32/kernel/ptrace.c 	return put_user_reg(tsk, off >> 2, val);
tsk                95 arch/unicore32/kernel/stacktrace.c void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               103 arch/unicore32/kernel/stacktrace.c 	if (tsk != current) {
tsk               105 arch/unicore32/kernel/stacktrace.c 		frame.fp = thread_saved_fp(tsk);
tsk               106 arch/unicore32/kernel/stacktrace.c 		frame.sp = thread_saved_sp(tsk);
tsk               108 arch/unicore32/kernel/stacktrace.c 		frame.pc = thread_saved_pc(tsk);
tsk               138 arch/unicore32/kernel/traps.c static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
tsk               145 arch/unicore32/kernel/traps.c 	if (!tsk)
tsk               146 arch/unicore32/kernel/traps.c 		tsk = current;
tsk               151 arch/unicore32/kernel/traps.c 	} else if (tsk != current) {
tsk               152 arch/unicore32/kernel/traps.c 		fp = thread_saved_fp(tsk);
tsk               165 arch/unicore32/kernel/traps.c 	} else if (fp < (unsigned long)end_of_stack(tsk))
tsk               173 arch/unicore32/kernel/traps.c void show_stack(struct task_struct *tsk, unsigned long *sp)
tsk               175 arch/unicore32/kernel/traps.c 	dump_backtrace(NULL, tsk);
tsk               182 arch/unicore32/kernel/traps.c 	struct task_struct *tsk = thread->task;
tsk               190 arch/unicore32/kernel/traps.c 	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
tsk               198 arch/unicore32/kernel/traps.c 		TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
tsk               202 arch/unicore32/kernel/traps.c 			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
tsk               203 arch/unicore32/kernel/traps.c 		dump_backtrace(regs, tsk);
tsk               119 arch/unicore32/mm/fault.c 	struct task_struct *tsk = current;
tsk               121 arch/unicore32/mm/fault.c 	tsk->thread.address = addr;
tsk               122 arch/unicore32/mm/fault.c 	tsk->thread.error_code = fsr;
tsk               123 arch/unicore32/mm/fault.c 	tsk->thread.trap_no = 14;
tsk               129 arch/unicore32/mm/fault.c 	struct task_struct *tsk = current;
tsk               130 arch/unicore32/mm/fault.c 	struct mm_struct *mm = tsk->active_mm;
tsk               163 arch/unicore32/mm/fault.c 		unsigned int fsr, unsigned int flags, struct task_struct *tsk)
tsk               201 arch/unicore32/mm/fault.c 	struct task_struct *tsk;
tsk               207 arch/unicore32/mm/fault.c 	tsk = current;
tsk               208 arch/unicore32/mm/fault.c 	mm = tsk->mm;
tsk               247 arch/unicore32/mm/fault.c 	fault = __do_pf(mm, addr, fsr, flags, tsk);
tsk               258 arch/unicore32/mm/fault.c 			tsk->maj_flt++;
tsk               260 arch/unicore32/mm/fault.c 			tsk->min_flt++;
tsk               123 arch/x86/entry/vsyscall/vsyscall_64.c 	struct task_struct *tsk;
tsk               175 arch/x86/entry/vsyscall/vsyscall_64.c 	tsk = current;
tsk               276 arch/x86/entry/vsyscall/vsyscall_64.c 		if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
tsk               277 arch/x86/entry/vsyscall/vsyscall_64.c 				 !sigismember(&tsk->pending.signal, SIGSEGV)))
tsk                24 arch/x86/include/asm/fpu/signal.h 			      struct task_struct *tsk);
tsk               180 arch/x86/include/asm/mmu_context.h void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
tsk               186 arch/x86/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk               211 arch/x86/include/asm/mmu_context.h 		      struct task_struct *tsk);
tsk               214 arch/x86/include/asm/mmu_context.h 			       struct task_struct *tsk);
tsk               224 arch/x86/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)			\
tsk               229 arch/x86/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)			\
tsk                 9 arch/x86/include/asm/pkeys.h extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk                41 arch/x86/include/asm/pkeys.h extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               118 arch/x86/include/asm/pkeys.h extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               120 arch/x86/include/asm/pkeys.h extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               377 arch/x86/include/asm/segment.h #  define task_user_gs(tsk)		((tsk)->thread.gs)
tsk               383 arch/x86/include/asm/segment.h #  define task_user_gs(tsk)		(task_pt_regs(tsk)->gs)
tsk                61 arch/x86/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk               247 arch/x86/include/asm/thread_info.h extern void arch_release_task_struct(struct task_struct *tsk);
tsk              1199 arch/x86/kernel/cpu/bugs.c static void task_update_spec_tif(struct task_struct *tsk)
tsk              1202 arch/x86/kernel/cpu/bugs.c 	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
tsk              1212 arch/x86/kernel/cpu/bugs.c 	if (tsk == current)
tsk               543 arch/x86/kernel/cpu/resctrl/rdtgroup.c static int __rdtgroup_move_task(struct task_struct *tsk,
tsk               560 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	ret = task_work_add(tsk, &callback->work, true);
tsk               577 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			tsk->closid = rdtgrp->closid;
tsk               578 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			tsk->rmid = rdtgrp->mon.rmid;
tsk               580 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			if (rdtgrp->mon.parent->closid == tsk->closid) {
tsk               581 arch/x86/kernel/cpu/resctrl/rdtgroup.c 				tsk->rmid = rdtgrp->mon.rmid;
tsk               642 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	struct task_struct *tsk;
tsk               647 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		tsk = find_task_by_vpid(pid);
tsk               648 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		if (!tsk) {
tsk               654 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		tsk = current;
tsk               657 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	get_task_struct(tsk);
tsk               660 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	ret = rdtgroup_task_write_permission(tsk, of);
tsk               662 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		ret = __rdtgroup_move_task(tsk, rdtgrp);
tsk               664 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	put_task_struct(tsk);
tsk               232 arch/x86/kernel/fpu/regset.c convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
tsk               234 arch/x86/kernel/fpu/regset.c 	struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
tsk               250 arch/x86/kernel/fpu/regset.c 	env->fcs = task_pt_regs(tsk)->cs;
tsk               251 arch/x86/kernel/fpu/regset.c 	if (tsk == current) {
tsk               254 arch/x86/kernel/fpu/regset.c 		env->fos = tsk->thread.ds;
tsk               368 arch/x86/kernel/fpu/regset.c 	struct task_struct *tsk = current;
tsk               370 arch/x86/kernel/fpu/regset.c 	return !fpregs_get(tsk, NULL, 0, sizeof(struct user_i387_ia32_struct),
tsk                58 arch/x86/kernel/fpu/signal.c static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
tsk                61 arch/x86/kernel/fpu/signal.c 		struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
tsk                67 arch/x86/kernel/fpu/signal.c 			copy_fxregs_to_kernel(&tsk->thread.fpu);
tsk                70 arch/x86/kernel/fpu/signal.c 		convert_from_fxsr(&env, tsk);
tsk               166 arch/x86/kernel/fpu/signal.c 	struct task_struct *tsk = current;
tsk               204 arch/x86/kernel/fpu/signal.c 	if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
tsk               277 arch/x86/kernel/fpu/signal.c 	struct task_struct *tsk = current;
tsk               278 arch/x86/kernel/fpu/signal.c 	struct fpu *fpu = &tsk->thread.fpu;
tsk               904 arch/x86/kernel/fpu/xstate.c int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk               397 arch/x86/kernel/hw_breakpoint.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               400 arch/x86/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
tsk               629 arch/x86/kernel/kgdb.c 	struct task_struct *tsk = current;
tsk               634 arch/x86/kernel/kgdb.c 			tsk->thread.debugreg6 |= (DR_TRAP0 << i);
tsk               110 arch/x86/kernel/process.c void exit_thread(struct task_struct *tsk)
tsk               112 arch/x86/kernel/process.c 	struct thread_struct *t = &tsk->thread;
tsk               137 arch/x86/kernel/process.c 	struct task_struct *tsk = current;
tsk               139 arch/x86/kernel/process.c 	flush_ptrace_hw_breakpoint(tsk);
tsk               140 arch/x86/kernel/process.c 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
tsk               142 arch/x86/kernel/process.c 	fpu__clear(&tsk->thread.fpu);
tsk               455 arch/x86/kernel/process.c static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
tsk               457 arch/x86/kernel/process.c 	if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
tsk               458 arch/x86/kernel/process.c 		if (task_spec_ssb_disable(tsk))
tsk               459 arch/x86/kernel/process.c 			set_tsk_thread_flag(tsk, TIF_SSBD);
tsk               461 arch/x86/kernel/process.c 			clear_tsk_thread_flag(tsk, TIF_SSBD);
tsk               463 arch/x86/kernel/process.c 		if (task_spec_ib_disable(tsk))
tsk               464 arch/x86/kernel/process.c 			set_tsk_thread_flag(tsk, TIF_SPEC_IB);
tsk               466 arch/x86/kernel/process.c 			clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
tsk               469 arch/x86/kernel/process.c 	return task_thread_info(tsk)->flags;
tsk               121 arch/x86/kernel/process_32.c 	struct task_struct *tsk;
tsk               154 arch/x86/kernel/process_32.c 	tsk = current;
tsk               157 arch/x86/kernel/process_32.c 	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
tsk               158 arch/x86/kernel/process_32.c 		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
tsk               531 arch/x86/kernel/ptrace.c ptrace_register_breakpoint(struct task_struct *tsk, int len, int type,
tsk               545 arch/x86/kernel/ptrace.c 						 NULL, tsk);
tsk               564 arch/x86/kernel/ptrace.c static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
tsk               566 arch/x86/kernel/ptrace.c 	struct thread_struct *thread = &tsk->thread;
tsk               585 arch/x86/kernel/ptrace.c 			bp = ptrace_register_breakpoint(tsk,
tsk               615 arch/x86/kernel/ptrace.c static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
tsk               617 arch/x86/kernel/ptrace.c 	struct thread_struct *thread = &tsk->thread;
tsk               634 arch/x86/kernel/ptrace.c static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
tsk               637 arch/x86/kernel/ptrace.c 	struct thread_struct *t = &tsk->thread;
tsk               653 arch/x86/kernel/ptrace.c 		bp = ptrace_register_breakpoint(tsk,
tsk               673 arch/x86/kernel/ptrace.c static int ptrace_set_debugreg(struct task_struct *tsk, int n,
tsk               676 arch/x86/kernel/ptrace.c 	struct thread_struct *thread = &tsk->thread;
tsk               681 arch/x86/kernel/ptrace.c 		rc = ptrace_set_breakpoint_addr(tsk, n, val);
tsk               686 arch/x86/kernel/ptrace.c 		rc = ptrace_write_dr7(tsk, val);
tsk              1337 arch/x86/kernel/ptrace.c 	struct task_struct *tsk = current;
tsk              1339 arch/x86/kernel/ptrace.c 	tsk->thread.trap_nr = X86_TRAP_DB;
tsk              1340 arch/x86/kernel/ptrace.c 	tsk->thread.error_code = error_code;
tsk               193 arch/x86/kernel/traps.c do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
tsk               210 arch/x86/kernel/traps.c 		tsk->thread.error_code = error_code;
tsk               211 arch/x86/kernel/traps.c 		tsk->thread.trap_nr = trapnr;
tsk               224 arch/x86/kernel/traps.c 	tsk->thread.error_code = error_code;
tsk               225 arch/x86/kernel/traps.c 	tsk->thread.trap_nr = trapnr;
tsk               230 arch/x86/kernel/traps.c static void show_signal(struct task_struct *tsk, int signr,
tsk               234 arch/x86/kernel/traps.c 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
tsk               237 arch/x86/kernel/traps.c 			tsk->comm, task_pid_nr(tsk), type, desc,
tsk               248 arch/x86/kernel/traps.c 	struct task_struct *tsk = current;
tsk               251 arch/x86/kernel/traps.c 	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
tsk               254 arch/x86/kernel/traps.c 	show_signal(tsk, signr, "trap ", str, regs, error_code);
tsk               319 arch/x86/kernel/traps.c 	struct task_struct *tsk = current;
tsk               374 arch/x86/kernel/traps.c 	tsk->thread.error_code = error_code;
tsk               375 arch/x86/kernel/traps.c 	tsk->thread.trap_nr = X86_TRAP_DF;
tsk               415 arch/x86/kernel/traps.c 	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
tsk               472 arch/x86/kernel/traps.c 		struct task_struct *tsk = current;
tsk               489 arch/x86/kernel/traps.c 		if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
tsk               493 arch/x86/kernel/traps.c 		show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
tsk               521 arch/x86/kernel/traps.c 	struct task_struct *tsk;
tsk               537 arch/x86/kernel/traps.c 	tsk = current;
tsk               542 arch/x86/kernel/traps.c 		tsk->thread.error_code = error_code;
tsk               543 arch/x86/kernel/traps.c 		tsk->thread.trap_nr = X86_TRAP_GP;
tsk               560 arch/x86/kernel/traps.c 	tsk->thread.error_code = error_code;
tsk               561 arch/x86/kernel/traps.c 	tsk->thread.trap_nr = X86_TRAP_GP;
tsk               563 arch/x86/kernel/traps.c 	show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
tsk               711 arch/x86/kernel/traps.c 	struct task_struct *tsk = current;
tsk               740 arch/x86/kernel/traps.c 	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
tsk               763 arch/x86/kernel/traps.c 	tsk->thread.debugreg6 = dr6;
tsk               798 arch/x86/kernel/traps.c 		tsk->thread.debugreg6 &= ~DR_STEP;
tsk               799 arch/x86/kernel/traps.c 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
tsk               802 arch/x86/kernel/traps.c 	si_code = get_si_code(tsk->thread.debugreg6);
tsk               803 arch/x86/kernel/traps.c 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
tsk               120 arch/x86/kernel/umip.c 	struct task_struct *tsk = current;
tsk               130 arch/x86/kernel/umip.c 	printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm,
tsk               131 arch/x86/kernel/umip.c 	       task_pid_nr(tsk), regs->ip, regs->sp, &vaf);
tsk               285 arch/x86/kernel/umip.c 	struct task_struct *tsk = current;
tsk               287 arch/x86/kernel/umip.c 	tsk->thread.cr2		= (unsigned long)addr;
tsk               288 arch/x86/kernel/umip.c 	tsk->thread.error_code	= X86_PF_USER | X86_PF_WRITE;
tsk               289 arch/x86/kernel/umip.c 	tsk->thread.trap_nr	= X86_TRAP_PF;
tsk               293 arch/x86/kernel/umip.c 	if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
tsk                98 arch/x86/kernel/vm86_32.c 	struct task_struct *tsk = current;
tsk               150 arch/x86/kernel/vm86_32.c 	tsk->thread.sp0 = vm86->saved_sp0;
tsk               151 arch/x86/kernel/vm86_32.c 	tsk->thread.sysenter_cs = __KERNEL_CS;
tsk               152 arch/x86/kernel/vm86_32.c 	update_task_stack(tsk);
tsk               153 arch/x86/kernel/vm86_32.c 	refresh_sysenter_cs(&tsk->thread);
tsk               241 arch/x86/kernel/vm86_32.c 	struct task_struct *tsk = current;
tsk               242 arch/x86/kernel/vm86_32.c 	struct vm86 *vm86 = tsk->thread.vm86;
tsk               276 arch/x86/kernel/vm86_32.c 		tsk->thread.vm86 = vm86;
tsk               365 arch/x86/kernel/vm86_32.c 	vm86->saved_sp0 = tsk->thread.sp0;
tsk               370 arch/x86/kernel/vm86_32.c 	tsk->thread.sp0 += 16;
tsk               373 arch/x86/kernel/vm86_32.c 		tsk->thread.sysenter_cs = 0;
tsk               374 arch/x86/kernel/vm86_32.c 		refresh_sysenter_cs(&tsk->thread);
tsk               377 arch/x86/kernel/vm86_32.c 	update_task_stack(tsk);
tsk               381 arch/x86/kernel/vm86_32.c 		mark_screen_rdonly(tsk->mm);
tsk               764 arch/x86/kernel/vm86_32.c 	struct task_struct *tsk;
tsk               782 arch/x86/kernel/vm86_32.c 	if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
tsk               786 arch/x86/kernel/vm86_32.c 		send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
tsk               805 arch/x86/kernel/vm86_32.c 	vm86_irqs[irqnumber].tsk = NULL;
tsk               816 arch/x86/kernel/vm86_32.c 	    if (vm86_irqs[i].tsk == task)
tsk               827 arch/x86/kernel/vm86_32.c 	if (vm86_irqs[irqnumber].tsk != current) return 0;
tsk               857 arch/x86/kernel/vm86_32.c 			if (vm86_irqs[irq].tsk) return -EPERM;
tsk               861 arch/x86/kernel/vm86_32.c 			vm86_irqs[irq].tsk = current;
tsk               866 arch/x86/kernel/vm86_32.c 			if (!vm86_irqs[irqnumber].tsk) return 0;
tsk               867 arch/x86/kernel/vm86_32.c 			if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
tsk              3369 arch/x86/kvm/mmu.c static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
tsk              3371 arch/x86/kvm/mmu.c 	send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
tsk               272 arch/x86/mm/fault.c 		 struct task_struct *tsk)
tsk               277 arch/x86/mm/fault.c 	if (!v8086_mode(regs) || !tsk->thread.vm86)
tsk               282 arch/x86/mm/fault.c 		tsk->thread.vm86->screen_bitmap |= 1 << bit;
tsk               435 arch/x86/mm/fault.c 		 struct task_struct *tsk)
tsk               688 arch/x86/mm/fault.c 	struct task_struct *tsk;
tsk               693 arch/x86/mm/fault.c 	tsk = current;
tsk               697 arch/x86/mm/fault.c 	       tsk->comm, address);
tsk               709 arch/x86/mm/fault.c 	struct task_struct *tsk = current;
tsk               723 arch/x86/mm/fault.c 	tsk->thread.trap_nr = X86_TRAP_PF;
tsk               724 arch/x86/mm/fault.c 	tsk->thread.error_code = error_code | X86_PF_USER;
tsk               725 arch/x86/mm/fault.c 	tsk->thread.cr2 = address;
tsk               732 arch/x86/mm/fault.c 	struct task_struct *tsk = current;
tsk               781 arch/x86/mm/fault.c 	    (((unsigned long)tsk->stack - 1 - address < PAGE_SIZE) ||
tsk               782 arch/x86/mm/fault.c 	     address - ((unsigned long)tsk->stack + THREAD_SIZE) < PAGE_SIZE)) {
tsk               838 arch/x86/mm/fault.c 	if (task_stack_end_corrupted(tsk))
tsk               857 arch/x86/mm/fault.c 		unsigned long address, struct task_struct *tsk)
tsk               859 arch/x86/mm/fault.c 	const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
tsk               861 arch/x86/mm/fault.c 	if (!unhandled_signal(tsk, SIGSEGV))
tsk               868 arch/x86/mm/fault.c 		loglvl, tsk->comm, task_pid_nr(tsk), address,
tsk               891 arch/x86/mm/fault.c 	struct task_struct *tsk = current;
tsk               919 arch/x86/mm/fault.c 			show_signal_msg(regs, error_code, address, tsk);
tsk              1037 arch/x86/mm/fault.c 		struct task_struct *tsk = current;
tsk              1042 arch/x86/mm/fault.c 			tsk->comm, tsk->pid, address);
tsk              1309 arch/x86/mm/fault.c 	struct task_struct *tsk;
tsk              1314 arch/x86/mm/fault.c 	tsk = current;
tsk              1315 arch/x86/mm/fault.c 	mm = tsk->mm;
tsk              1476 arch/x86/mm/fault.c 			if (!fatal_signal_pending(tsk))
tsk              1500 arch/x86/mm/fault.c 		tsk->maj_flt++;
tsk              1503 arch/x86/mm/fault.c 		tsk->min_flt++;
tsk              1507 arch/x86/mm/fault.c 	check_v8086_mode(regs, address, tsk);
tsk               155 arch/x86/mm/tlb.c 	       struct task_struct *tsk)
tsk               160 arch/x86/mm/tlb.c 	switch_mm_irqs_off(prev, next, tsk);
tsk               276 arch/x86/mm/tlb.c 			struct task_struct *tsk)
tsk               378 arch/x86/mm/tlb.c 		cond_ibpb(tsk);
tsk               461 arch/x86/mm/tlb.c void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                14 arch/x86/um/asm/processor.h #define KSTK_EIP(tsk) KSTK_REG(tsk, HOST_IP)
tsk                15 arch/x86/um/asm/processor.h #define KSTK_ESP(tsk) KSTK_REG(tsk, HOST_SP)
tsk                16 arch/x86/um/asm/processor.h #define KSTK_EBP(tsk) KSTK_REG(tsk, HOST_BP)
tsk                50 arch/xtensa/include/asm/hw_breakpoint.h void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
tsk                56 arch/xtensa/include/asm/hw_breakpoint.h static inline void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               114 arch/xtensa/include/asm/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk               126 arch/xtensa/include/asm/mmu_context.h 			     struct task_struct *tsk)
tsk               140 arch/xtensa/include/asm/mmu_context.h #define deactivate_mm(tsk, mm)	do { } while (0)
tsk               152 arch/xtensa/include/asm/mmu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                10 arch/xtensa/include/asm/nommu_context.h static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
tsk                14 arch/xtensa/include/asm/nommu_context.h static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
tsk                28 arch/xtensa/include/asm/nommu_context.h 				struct task_struct *tsk)
tsk                32 arch/xtensa/include/asm/nommu_context.h static inline void deactivate_mm(struct task_struct *tsk, struct mm_struct *mm)
tsk               219 arch/xtensa/include/asm/processor.h #define KSTK_EIP(tsk)		(task_pt_regs(tsk)->pc)
tsk               220 arch/xtensa/include/asm/processor.h #define KSTK_ESP(tsk)		(task_pt_regs(tsk)->areg[1])
tsk                86 arch/xtensa/include/asm/ptrace.h # define task_pt_regs(tsk) ((struct pt_regs*) \
tsk                87 arch/xtensa/include/asm/ptrace.h 	(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
tsk                74 arch/xtensa/include/asm/thread_info.h #define INIT_THREAD_INFO(tsk)			\
tsk                76 arch/xtensa/include/asm/thread_info.h 	.task		= &tsk,			\
tsk               233 arch/xtensa/kernel/hw_breakpoint.c void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               236 arch/xtensa/kernel/hw_breakpoint.c 	struct thread_struct *t = &tsk->thread;
tsk               257 arch/xtensa/kernel/hw_breakpoint.c void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
tsk               259 arch/xtensa/kernel/hw_breakpoint.c 	memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
tsk               260 arch/xtensa/kernel/hw_breakpoint.c 	memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
tsk               129 arch/xtensa/kernel/process.c void exit_thread(struct task_struct *tsk)
tsk               132 arch/xtensa/kernel/process.c 	coprocessor_release_all(task_thread_info(tsk));
tsk               387 arch/xtensa/kernel/ptrace.c static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
tsk               400 arch/xtensa/kernel/ptrace.c 					   tsk);
tsk              4970 block/bfq-iosched.c 	struct task_struct *tsk = current;
tsk              4987 block/bfq-iosched.c 		bfqq->new_ioprio = task_nice_ioprio(tsk);
tsk              4988 block/bfq-iosched.c 		bfqq->new_ioprio_class = task_nice_ioclass(tsk);
tsk              1320 block/blk-cgroup.c static void blkcg_exit(struct task_struct *tsk)
tsk              1322 block/blk-cgroup.c 	if (tsk->throttle_queue)
tsk              1323 block/blk-cgroup.c 		blk_put_queue(tsk->throttle_queue);
tsk              1324 block/blk-cgroup.c 	tsk->throttle_queue = NULL;
tsk              1698 block/blk-core.c 	struct task_struct *tsk = current;
tsk              1703 block/blk-core.c 	if (tsk->plug)
tsk              1715 block/blk-core.c 	tsk->plug = plug;
tsk               471 drivers/android/binder.c 	struct task_struct *tsk;
tsk              2351 drivers/android/binder.c 				proc->tsk == current->group_leader);
tsk              2368 drivers/android/binder.c 			if (proc->tsk != current->group_leader) {
tsk              2460 drivers/android/binder.c 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
tsk              2506 drivers/android/binder.c 	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
tsk              2594 drivers/android/binder.c 	ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
tsk              2987 drivers/android/binder.c 		if (security_binder_transaction(proc->tsk,
tsk              2988 drivers/android/binder.c 						target_proc->tsk) < 0) {
tsk              3103 drivers/android/binder.c 	t->sender_euid = task_euid(proc->tsk);
tsk              3114 drivers/android/binder.c 		security_task_getsecid(proc->tsk, &secid);
tsk              4455 drivers/android/binder.c 			struct task_struct *sender = t_from->proc->tsk;
tsk              4694 drivers/android/binder.c 	put_task_struct(proc->tsk);
tsk              4905 drivers/android/binder.c 	ret = security_binder_set_context_mgr(proc->tsk);
tsk              5173 drivers/android/binder.c 	if (proc->tsk != current->group_leader)
tsk              5221 drivers/android/binder.c 	proc->tsk = current->group_leader;
tsk               498 drivers/base/power/main.c 	struct task_struct	*tsk;
tsk               518 drivers/base/power/main.c 	show_stack(wd->tsk, NULL);
tsk               533 drivers/base/power/main.c 	wd->tsk = current;
tsk                60 drivers/dma/bestcomm/ata.c 	struct bcom_task *tsk;
tsk                67 drivers/dma/bestcomm/ata.c 	tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
tsk                68 drivers/dma/bestcomm/ata.c 	if (!tsk)
tsk                71 drivers/dma/bestcomm/ata.c 	tsk->flags = BCOM_FLAGS_NONE;
tsk                73 drivers/dma/bestcomm/ata.c 	bcom_ata_reset_bd(tsk);
tsk                75 drivers/dma/bestcomm/ata.c 	var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
tsk                76 drivers/dma/bestcomm/ata.c 	inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
tsk                78 drivers/dma/bestcomm/ata.c 	if (bcom_load_image(tsk->tasknum, bcom_ata_task)) {
tsk                79 drivers/dma/bestcomm/ata.c 		bcom_task_free(tsk);
tsk                84 drivers/dma/bestcomm/ata.c 				offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
tsk                85 drivers/dma/bestcomm/ata.c 	var->bd_base	= tsk->bd_pa;
tsk                86 drivers/dma/bestcomm/ata.c 	var->bd_last	= tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
tsk                87 drivers/dma/bestcomm/ata.c 	var->bd_start	= tsk->bd_pa;
tsk                91 drivers/dma/bestcomm/ata.c 	bcom_set_task_pragma(tsk->tasknum, BCOM_ATA_PRAGMA);
tsk                92 drivers/dma/bestcomm/ata.c 	bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
tsk                97 drivers/dma/bestcomm/ata.c 	out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum); /* Clear ints */
tsk                99 drivers/dma/bestcomm/ata.c 	return tsk;
tsk               103 drivers/dma/bestcomm/ata.c void bcom_ata_rx_prepare(struct bcom_task *tsk)
tsk               107 drivers/dma/bestcomm/ata.c 	inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
tsk               113 drivers/dma/bestcomm/ata.c 	bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_RX);
tsk               117 drivers/dma/bestcomm/ata.c void bcom_ata_tx_prepare(struct bcom_task *tsk)
tsk               121 drivers/dma/bestcomm/ata.c 	inc = (struct bcom_ata_inc *) bcom_task_inc(tsk->tasknum);
tsk               127 drivers/dma/bestcomm/ata.c 	bcom_set_initiator(tsk->tasknum, BCOM_INITIATOR_ATA_TX);
tsk               131 drivers/dma/bestcomm/ata.c void bcom_ata_reset_bd(struct bcom_task *tsk)
tsk               136 drivers/dma/bestcomm/ata.c 	memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk               138 drivers/dma/bestcomm/ata.c 	tsk->index = 0;
tsk               139 drivers/dma/bestcomm/ata.c 	tsk->outdex = 0;
tsk               141 drivers/dma/bestcomm/ata.c 	var = (struct bcom_ata_var *) bcom_task_var(tsk->tasknum);
tsk               146 drivers/dma/bestcomm/ata.c void bcom_ata_release(struct bcom_task *tsk)
tsk               149 drivers/dma/bestcomm/ata.c 	bcom_task_free(tsk);
tsk                53 drivers/dma/bestcomm/bestcomm.c 	struct bcom_task *tsk;
tsk                75 drivers/dma/bestcomm/bestcomm.c 	tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
tsk                76 drivers/dma/bestcomm/bestcomm.c 	if (!tsk)
tsk                79 drivers/dma/bestcomm/bestcomm.c 	tsk->tasknum = tasknum;
tsk                81 drivers/dma/bestcomm/bestcomm.c 		tsk->priv = (void*)tsk + sizeof(struct bcom_task);
tsk                84 drivers/dma/bestcomm/bestcomm.c 	tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
tsk                85 drivers/dma/bestcomm/bestcomm.c 	if (!tsk->irq)
tsk                90 drivers/dma/bestcomm/bestcomm.c 		tsk->cookie = kmalloc_array(bd_count, sizeof(void *),
tsk                92 drivers/dma/bestcomm/bestcomm.c 		if (!tsk->cookie)
tsk                95 drivers/dma/bestcomm/bestcomm.c 		tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
tsk                96 drivers/dma/bestcomm/bestcomm.c 		if (!tsk->bd)
tsk                98 drivers/dma/bestcomm/bestcomm.c 		memset(tsk->bd, 0x00, bd_count * bd_size);
tsk               100 drivers/dma/bestcomm/bestcomm.c 		tsk->num_bd = bd_count;
tsk               101 drivers/dma/bestcomm/bestcomm.c 		tsk->bd_size = bd_size;
tsk               104 drivers/dma/bestcomm/bestcomm.c 	return tsk;
tsk               107 drivers/dma/bestcomm/bestcomm.c 	if (tsk) {
tsk               108 drivers/dma/bestcomm/bestcomm.c 		if (tsk->irq)
tsk               109 drivers/dma/bestcomm/bestcomm.c 			irq_dispose_mapping(tsk->irq);
tsk               110 drivers/dma/bestcomm/bestcomm.c 		bcom_sram_free(tsk->bd);
tsk               111 drivers/dma/bestcomm/bestcomm.c 		kfree(tsk->cookie);
tsk               112 drivers/dma/bestcomm/bestcomm.c 		kfree(tsk);
tsk               122 drivers/dma/bestcomm/bestcomm.c bcom_task_free(struct bcom_task *tsk)
tsk               125 drivers/dma/bestcomm/bestcomm.c 	bcom_disable_task(tsk->tasknum);
tsk               128 drivers/dma/bestcomm/bestcomm.c 	bcom_eng->tdt[tsk->tasknum].start = 0;
tsk               129 drivers/dma/bestcomm/bestcomm.c 	bcom_eng->tdt[tsk->tasknum].stop  = 0;
tsk               132 drivers/dma/bestcomm/bestcomm.c 	irq_dispose_mapping(tsk->irq);
tsk               133 drivers/dma/bestcomm/bestcomm.c 	bcom_sram_free(tsk->bd);
tsk               134 drivers/dma/bestcomm/bestcomm.c 	kfree(tsk->cookie);
tsk               135 drivers/dma/bestcomm/bestcomm.c 	kfree(tsk);
tsk               237 drivers/dma/bestcomm/bestcomm.c bcom_enable(struct bcom_task *tsk)
tsk               239 drivers/dma/bestcomm/bestcomm.c 	bcom_enable_task(tsk->tasknum);
tsk               244 drivers/dma/bestcomm/bestcomm.c bcom_disable(struct bcom_task *tsk)
tsk               246 drivers/dma/bestcomm/bestcomm.c 	bcom_disable_task(tsk->tasknum);
tsk                87 drivers/dma/bestcomm/fec.c 	struct bcom_task *tsk;
tsk                90 drivers/dma/bestcomm/fec.c 	tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
tsk                92 drivers/dma/bestcomm/fec.c 	if (!tsk)
tsk                95 drivers/dma/bestcomm/fec.c 	tsk->flags = BCOM_FLAGS_NONE;
tsk                97 drivers/dma/bestcomm/fec.c 	priv = tsk->priv;
tsk               101 drivers/dma/bestcomm/fec.c 	if (bcom_fec_rx_reset(tsk)) {
tsk               102 drivers/dma/bestcomm/fec.c 		bcom_task_free(tsk);
tsk               106 drivers/dma/bestcomm/fec.c 	return tsk;
tsk               111 drivers/dma/bestcomm/fec.c bcom_fec_rx_reset(struct bcom_task *tsk)
tsk               113 drivers/dma/bestcomm/fec.c 	struct bcom_fec_priv *priv = tsk->priv;
tsk               118 drivers/dma/bestcomm/fec.c 	bcom_disable_task(tsk->tasknum);
tsk               121 drivers/dma/bestcomm/fec.c 	var = (struct bcom_fec_rx_var *) bcom_task_var(tsk->tasknum);
tsk               122 drivers/dma/bestcomm/fec.c 	inc = (struct bcom_fec_rx_inc *) bcom_task_inc(tsk->tasknum);
tsk               124 drivers/dma/bestcomm/fec.c 	if (bcom_load_image(tsk->tasknum, bcom_fec_rx_task))
tsk               128 drivers/dma/bestcomm/fec.c 				offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
tsk               130 drivers/dma/bestcomm/fec.c 	var->bd_base	= tsk->bd_pa;
tsk               131 drivers/dma/bestcomm/fec.c 	var->bd_last	= tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
tsk               132 drivers/dma/bestcomm/fec.c 	var->bd_start	= tsk->bd_pa;
tsk               140 drivers/dma/bestcomm/fec.c 	tsk->index = 0;
tsk               141 drivers/dma/bestcomm/fec.c 	tsk->outdex = 0;
tsk               143 drivers/dma/bestcomm/fec.c 	memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk               146 drivers/dma/bestcomm/fec.c 	bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_RX_BD_PRAGMA);
tsk               147 drivers/dma/bestcomm/fec.c 	bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
tsk               151 drivers/dma/bestcomm/fec.c 	out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum);	/* Clear ints */
tsk               158 drivers/dma/bestcomm/fec.c bcom_fec_rx_release(struct bcom_task *tsk)
tsk               161 drivers/dma/bestcomm/fec.c 	bcom_task_free(tsk);
tsk               189 drivers/dma/bestcomm/fec.c 	struct bcom_task *tsk;
tsk               192 drivers/dma/bestcomm/fec.c 	tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_fec_bd),
tsk               194 drivers/dma/bestcomm/fec.c 	if (!tsk)
tsk               197 drivers/dma/bestcomm/fec.c 	tsk->flags = BCOM_FLAGS_ENABLE_TASK;
tsk               199 drivers/dma/bestcomm/fec.c 	priv = tsk->priv;
tsk               202 drivers/dma/bestcomm/fec.c 	if (bcom_fec_tx_reset(tsk)) {
tsk               203 drivers/dma/bestcomm/fec.c 		bcom_task_free(tsk);
tsk               207 drivers/dma/bestcomm/fec.c 	return tsk;
tsk               212 drivers/dma/bestcomm/fec.c bcom_fec_tx_reset(struct bcom_task *tsk)
tsk               214 drivers/dma/bestcomm/fec.c 	struct bcom_fec_priv *priv = tsk->priv;
tsk               219 drivers/dma/bestcomm/fec.c 	bcom_disable_task(tsk->tasknum);
tsk               222 drivers/dma/bestcomm/fec.c 	var = (struct bcom_fec_tx_var *) bcom_task_var(tsk->tasknum);
tsk               223 drivers/dma/bestcomm/fec.c 	inc = (struct bcom_fec_tx_inc *) bcom_task_inc(tsk->tasknum);
tsk               225 drivers/dma/bestcomm/fec.c 	if (bcom_load_image(tsk->tasknum, bcom_fec_tx_task))
tsk               229 drivers/dma/bestcomm/fec.c 				offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
tsk               231 drivers/dma/bestcomm/fec.c 	var->DRD	= bcom_sram_va2pa(self_modified_drd(tsk->tasknum));
tsk               232 drivers/dma/bestcomm/fec.c 	var->bd_base	= tsk->bd_pa;
tsk               233 drivers/dma/bestcomm/fec.c 	var->bd_last	= tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
tsk               234 drivers/dma/bestcomm/fec.c 	var->bd_start	= tsk->bd_pa;
tsk               241 drivers/dma/bestcomm/fec.c 	tsk->index = 0;
tsk               242 drivers/dma/bestcomm/fec.c 	tsk->outdex = 0;
tsk               244 drivers/dma/bestcomm/fec.c 	memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk               247 drivers/dma/bestcomm/fec.c 	bcom_set_task_pragma(tsk->tasknum, BCOM_FEC_TX_BD_PRAGMA);
tsk               248 drivers/dma/bestcomm/fec.c 	bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
tsk               252 drivers/dma/bestcomm/fec.c 	out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum);	/* Clear ints */
tsk               259 drivers/dma/bestcomm/fec.c bcom_fec_tx_release(struct bcom_task *tsk)
tsk               262 drivers/dma/bestcomm/fec.c 	bcom_task_free(tsk);
tsk                88 drivers/dma/bestcomm/gen_bd.c 	struct bcom_task *tsk;
tsk                91 drivers/dma/bestcomm/gen_bd.c 	tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
tsk                93 drivers/dma/bestcomm/gen_bd.c 	if (!tsk)
tsk                96 drivers/dma/bestcomm/gen_bd.c 	tsk->flags = BCOM_FLAGS_NONE;
tsk                98 drivers/dma/bestcomm/gen_bd.c 	priv = tsk->priv;
tsk               104 drivers/dma/bestcomm/gen_bd.c 	if (bcom_gen_bd_rx_reset(tsk)) {
tsk               105 drivers/dma/bestcomm/gen_bd.c 		bcom_task_free(tsk);
tsk               109 drivers/dma/bestcomm/gen_bd.c 	return tsk;
tsk               114 drivers/dma/bestcomm/gen_bd.c bcom_gen_bd_rx_reset(struct bcom_task *tsk)
tsk               116 drivers/dma/bestcomm/gen_bd.c 	struct bcom_gen_bd_priv *priv = tsk->priv;
tsk               121 drivers/dma/bestcomm/gen_bd.c 	bcom_disable_task(tsk->tasknum);
tsk               124 drivers/dma/bestcomm/gen_bd.c 	var = (struct bcom_gen_bd_rx_var *) bcom_task_var(tsk->tasknum);
tsk               125 drivers/dma/bestcomm/gen_bd.c 	inc = (struct bcom_gen_bd_rx_inc *) bcom_task_inc(tsk->tasknum);
tsk               127 drivers/dma/bestcomm/gen_bd.c 	if (bcom_load_image(tsk->tasknum, bcom_gen_bd_rx_task))
tsk               131 drivers/dma/bestcomm/gen_bd.c 				offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
tsk               133 drivers/dma/bestcomm/gen_bd.c 	var->bd_base	= tsk->bd_pa;
tsk               134 drivers/dma/bestcomm/gen_bd.c 	var->bd_last	= tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
tsk               135 drivers/dma/bestcomm/gen_bd.c 	var->bd_start	= tsk->bd_pa;
tsk               142 drivers/dma/bestcomm/gen_bd.c 	tsk->index = 0;
tsk               143 drivers/dma/bestcomm/gen_bd.c 	tsk->outdex = 0;
tsk               145 drivers/dma/bestcomm/gen_bd.c 	memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk               148 drivers/dma/bestcomm/gen_bd.c 	bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_RX_BD_PRAGMA);
tsk               149 drivers/dma/bestcomm/gen_bd.c 	bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
tsk               152 drivers/dma/bestcomm/gen_bd.c 	bcom_set_initiator(tsk->tasknum, priv->initiator);
tsk               154 drivers/dma/bestcomm/gen_bd.c 	out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum);	/* Clear ints */
tsk               161 drivers/dma/bestcomm/gen_bd.c bcom_gen_bd_rx_release(struct bcom_task *tsk)
tsk               164 drivers/dma/bestcomm/gen_bd.c 	bcom_task_free(tsk);
tsk               173 drivers/dma/bestcomm/gen_bd.c 	struct bcom_task *tsk;
tsk               176 drivers/dma/bestcomm/gen_bd.c 	tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_gen_bd),
tsk               178 drivers/dma/bestcomm/gen_bd.c 	if (!tsk)
tsk               181 drivers/dma/bestcomm/gen_bd.c 	tsk->flags = BCOM_FLAGS_NONE;
tsk               183 drivers/dma/bestcomm/gen_bd.c 	priv = tsk->priv;
tsk               188 drivers/dma/bestcomm/gen_bd.c 	if (bcom_gen_bd_tx_reset(tsk)) {
tsk               189 drivers/dma/bestcomm/gen_bd.c 		bcom_task_free(tsk);
tsk               193 drivers/dma/bestcomm/gen_bd.c 	return tsk;
tsk               198 drivers/dma/bestcomm/gen_bd.c bcom_gen_bd_tx_reset(struct bcom_task *tsk)
tsk               200 drivers/dma/bestcomm/gen_bd.c 	struct bcom_gen_bd_priv *priv = tsk->priv;
tsk               205 drivers/dma/bestcomm/gen_bd.c 	bcom_disable_task(tsk->tasknum);
tsk               208 drivers/dma/bestcomm/gen_bd.c 	var = (struct bcom_gen_bd_tx_var *) bcom_task_var(tsk->tasknum);
tsk               209 drivers/dma/bestcomm/gen_bd.c 	inc = (struct bcom_gen_bd_tx_inc *) bcom_task_inc(tsk->tasknum);
tsk               211 drivers/dma/bestcomm/gen_bd.c 	if (bcom_load_image(tsk->tasknum, bcom_gen_bd_tx_task))
tsk               215 drivers/dma/bestcomm/gen_bd.c 				offsetof(struct mpc52xx_sdma, tcr[tsk->tasknum]);
tsk               217 drivers/dma/bestcomm/gen_bd.c 	var->bd_base	= tsk->bd_pa;
tsk               218 drivers/dma/bestcomm/gen_bd.c 	var->bd_last	= tsk->bd_pa + ((tsk->num_bd-1) * tsk->bd_size);
tsk               219 drivers/dma/bestcomm/gen_bd.c 	var->bd_start	= tsk->bd_pa;
tsk               226 drivers/dma/bestcomm/gen_bd.c 	tsk->index = 0;
tsk               227 drivers/dma/bestcomm/gen_bd.c 	tsk->outdex = 0;
tsk               229 drivers/dma/bestcomm/gen_bd.c 	memset(tsk->bd, 0x00, tsk->num_bd * tsk->bd_size);
tsk               232 drivers/dma/bestcomm/gen_bd.c 	bcom_set_task_pragma(tsk->tasknum, BCOM_GEN_TX_BD_PRAGMA);
tsk               233 drivers/dma/bestcomm/gen_bd.c 	bcom_set_task_auto_start(tsk->tasknum, tsk->tasknum);
tsk               236 drivers/dma/bestcomm/gen_bd.c 	bcom_set_initiator(tsk->tasknum, priv->initiator);
tsk               238 drivers/dma/bestcomm/gen_bd.c 	out_be32(&bcom_eng->regs->IntPend, 1<<tsk->tasknum);	/* Clear ints */
tsk               245 drivers/dma/bestcomm/gen_bd.c bcom_gen_bd_tx_release(struct bcom_task *tsk)
tsk               248 drivers/dma/bestcomm/gen_bd.c 	bcom_task_free(tsk);
tsk               827 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			struct task_struct *tsk;
tsk               841 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			tsk = kthread_run(active_engine, &threads[tmp],
tsk               843 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			if (IS_ERR(tsk)) {
tsk               844 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 				err = PTR_ERR(tsk);
tsk               848 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			threads[tmp].task = tsk;
tsk               849 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			get_task_struct(tsk);
tsk              1188 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	struct task_struct *tsk = NULL;
tsk              1280 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	tsk = kthread_run(fn, &arg, "igt/evict_vma");
tsk              1281 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (IS_ERR(tsk)) {
tsk              1282 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		err = PTR_ERR(tsk);
tsk              1283 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		tsk = NULL;
tsk              1286 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	get_task_struct(tsk);
tsk              1305 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 	if (tsk) {
tsk              1310 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 			err = kthread_stop(tsk);
tsk              1312 drivers/gpu/drm/i915/gt/selftest_hangcheck.c 		put_task_struct(tsk);
tsk              1510 drivers/gpu/drm/i915/gt/selftest_lrc.c 	struct task_struct *tsk[I915_NUM_ENGINES] = {};
tsk              1526 drivers/gpu/drm/i915/gt/selftest_lrc.c 		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
tsk              1528 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR(tsk[id])) {
tsk              1529 drivers/gpu/drm/i915/gt/selftest_lrc.c 			err = PTR_ERR(tsk[id]);
tsk              1532 drivers/gpu/drm/i915/gt/selftest_lrc.c 		get_task_struct(tsk[id]);
tsk              1539 drivers/gpu/drm/i915/gt/selftest_lrc.c 		if (IS_ERR_OR_NULL(tsk[id]))
tsk              1542 drivers/gpu/drm/i915/gt/selftest_lrc.c 		status = kthread_stop(tsk[id]);
tsk              1548 drivers/gpu/drm/i915/gt/selftest_lrc.c 		put_task_struct(tsk[id]);
tsk              1384 drivers/gpu/drm/i915/i915_request.c 	struct task_struct *tsk;
tsk              1391 drivers/gpu/drm/i915/i915_request.c 	wake_up_process(wait->tsk);
tsk              1483 drivers/gpu/drm/i915/i915_request.c 	wait.tsk = current;
tsk              1180 drivers/gpu/drm/i915/selftests/i915_request.c 			struct task_struct *tsk;
tsk              1182 drivers/gpu/drm/i915/selftests/i915_request.c 			tsk = kthread_run(__igt_breadcrumbs_smoketest,
tsk              1184 drivers/gpu/drm/i915/selftests/i915_request.c 			if (IS_ERR(tsk)) {
tsk              1185 drivers/gpu/drm/i915/selftests/i915_request.c 				ret = PTR_ERR(tsk);
tsk              1190 drivers/gpu/drm/i915/selftests/i915_request.c 			get_task_struct(tsk);
tsk              1191 drivers/gpu/drm/i915/selftests/i915_request.c 			threads[id * ncpus + n] = tsk;
tsk              1203 drivers/gpu/drm/i915/selftests/i915_request.c 			struct task_struct *tsk = threads[id * ncpus + n];
tsk              1206 drivers/gpu/drm/i915/selftests/i915_request.c 			if (!tsk)
tsk              1209 drivers/gpu/drm/i915/selftests/i915_request.c 			err = kthread_stop(tsk);
tsk              1213 drivers/gpu/drm/i915/selftests/i915_request.c 			put_task_struct(tsk);
tsk               423 drivers/md/md.c 	WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
tsk              6097 drivers/md/md.c 		wake_up_process(mddev->sync_thread->tsk);
tsk              6161 drivers/md/md.c 		wake_up_process(mddev->sync_thread->tsk);
tsk              7719 drivers/md/md.c 		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
tsk              7740 drivers/md/md.c 	thread->tsk = kthread_run(md_thread, thread,
tsk              7744 drivers/md/md.c 	if (IS_ERR(thread->tsk)) {
tsk              7757 drivers/md/md.c 	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
tsk              7765 drivers/md/md.c 	kthread_stop(thread->tsk);
tsk               670 drivers/md/md.h 	struct task_struct	*tsk;
tsk              1586 drivers/md/raid5-cache.c 		kthread_park(log->reclaim_thread->tsk);
tsk              1590 drivers/md/raid5-cache.c 		kthread_unpark(log->reclaim_thread->tsk);
tsk                66 drivers/net/ppp/ppp_async.c 	struct tasklet_struct tsk;
tsk               182 drivers/net/ppp/ppp_async.c 	tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
tsk               235 drivers/net/ppp/ppp_async.c 	tasklet_kill(&ap->tsk);
tsk               353 drivers/net/ppp/ppp_async.c 		tasklet_schedule(&ap->tsk);
tsk               367 drivers/net/ppp/ppp_async.c 	tasklet_schedule(&ap->tsk);
tsk                70 drivers/net/ppp/ppp_synctty.c 	struct tasklet_struct tsk;
tsk               180 drivers/net/ppp/ppp_synctty.c 	tasklet_init(&ap->tsk, ppp_sync_process, (unsigned long) ap);
tsk               234 drivers/net/ppp/ppp_synctty.c 	tasklet_kill(&ap->tsk);
tsk               346 drivers/net/ppp/ppp_synctty.c 		tasklet_schedule(&ap->tsk);
tsk               360 drivers/net/ppp/ppp_synctty.c 	tasklet_schedule(&ap->tsk);
tsk               266 drivers/oprofile/cpu_buffer.c 	struct task_struct *tsk = task ? task : current;
tsk               274 drivers/oprofile/cpu_buffer.c 	if (op_add_code(cpu_buf, backtrace, is_kernel, tsk))
tsk               105 drivers/pcmcia/cs.c 	struct task_struct *tsk;
tsk               169 drivers/pcmcia/cs.c 	tsk = kthread_run(pccardd, socket, "pccardd");
tsk               170 drivers/pcmcia/cs.c 	if (IS_ERR(tsk)) {
tsk               171 drivers/pcmcia/cs.c 		ret = PTR_ERR(tsk);
tsk                55 drivers/powercap/idle_inject.c 	struct task_struct *tsk;
tsk                91 drivers/powercap/idle_inject.c 		wake_up_process(iit->tsk);
tsk               243 drivers/powercap/idle_inject.c 		wait_task_inactive(iit->tsk, 0);
tsk               346 drivers/powercap/idle_inject.c 	.store = &idle_inject_thread.tsk,
tsk              2382 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	struct srp_tsk_mgmt *tsk;
tsk              2416 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			tsk = &vio_iu(iue)->srp.tsk_mgmt;
tsk              2418 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 				tsk->tag, tsk->tag);
tsk              2419 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			cmd->rsp.tag = tsk->tag;
tsk              2477 drivers/scsi/qla2xxx/qla_iocb.c qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk              2490 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
tsk              2491 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->entry_count = 1;
tsk              2492 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
tsk              2493 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
tsk              2494 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk              2495 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->control_flags = cpu_to_le32(flags);
tsk              2496 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk              2497 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->port_id[1] = fcport->d_id.b.area;
tsk              2498 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->port_id[2] = fcport->d_id.b.domain;
tsk              2499 drivers/scsi/qla2xxx/qla_iocb.c 	tsk->vp_index = fcport->vha->vp_idx;
tsk              2502 drivers/scsi/qla2xxx/qla_iocb.c 		int_to_scsilun(lun, &tsk->lun);
tsk              2503 drivers/scsi/qla2xxx/qla_iocb.c 		host_to_fcp_swap((uint8_t *)&tsk->lun,
tsk              2504 drivers/scsi/qla2xxx/qla_iocb.c 			sizeof(tsk->lun));
tsk              1812 drivers/scsi/qla2xxx/qla_isr.c qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
tsk              1819 drivers/scsi/qla2xxx/qla_isr.c 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
tsk              1821 drivers/scsi/qla2xxx/qla_isr.c 	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
tsk              1862 drivers/scsi/qla2xxx/qla_isr.c     void *tsk, srb_t *sp)
tsk              1866 drivers/scsi/qla2xxx/qla_isr.c 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
tsk              3187 drivers/scsi/qla2xxx/qla_mbx.c 		struct tsk_mgmt_entry tsk;
tsk              3197 drivers/scsi/qla2xxx/qla_mbx.c 	struct tsk_mgmt_cmd *tsk;
tsk              3218 drivers/scsi/qla2xxx/qla_mbx.c 	tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
tsk              3219 drivers/scsi/qla2xxx/qla_mbx.c 	if (tsk == NULL) {
tsk              3225 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
tsk              3226 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.entry_count = 1;
tsk              3227 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
tsk              3228 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
tsk              3229 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
tsk              3230 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.control_flags = cpu_to_le32(type);
tsk              3231 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk              3232 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk              3233 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
tsk              3234 drivers/scsi/qla2xxx/qla_mbx.c 	tsk->p.tsk.vp_index = fcport->vha->vp_idx;
tsk              3236 drivers/scsi/qla2xxx/qla_mbx.c 		int_to_scsilun(l, &tsk->p.tsk.lun);
tsk              3237 drivers/scsi/qla2xxx/qla_mbx.c 		host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
tsk              3238 drivers/scsi/qla2xxx/qla_mbx.c 		    sizeof(tsk->p.tsk.lun));
tsk              3241 drivers/scsi/qla2xxx/qla_mbx.c 	sts = &tsk->p.sts;
tsk              3242 drivers/scsi/qla2xxx/qla_mbx.c 	rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
tsk              3282 drivers/scsi/qla2xxx/qla_mbx.c 	dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
tsk               325 drivers/tty/tty_jobctrl.c 	struct task_struct *tsk = current;
tsk               327 drivers/tty/tty_jobctrl.c 	proc_clear_tty(tsk);
tsk                77 drivers/tty/tty_ldsem.c 	struct task_struct *tsk;
tsk                95 drivers/tty/tty_ldsem.c 		tsk = waiter->task;
tsk                97 drivers/tty/tty_ldsem.c 		wake_up_process(tsk);
tsk                98 drivers/tty/tty_ldsem.c 		put_task_struct(tsk);
tsk              2135 fs/binfmt_elf.c static struct vm_area_struct *first_vma(struct task_struct *tsk,
tsk              2138 fs/binfmt_elf.c 	struct vm_area_struct *ret = tsk->mm->mmap;
tsk               143 fs/btrfs/inode-map.c 	struct task_struct *tsk;
tsk               182 fs/btrfs/inode-map.c 	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
tsk               184 fs/btrfs/inode-map.c 	if (IS_ERR(tsk))
tsk              4283 fs/btrfs/volumes.c 	struct task_struct *tsk;
tsk              4306 fs/btrfs/volumes.c 	tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
tsk              4307 fs/btrfs/volumes.c 	return PTR_ERR_OR_ZERO(tsk);
tsk               686 fs/cifs/cifsglob.h 	struct task_struct *tsk;
tsk              1308 fs/cifs/connect.c 	task_to_wake = xchg(&server->tsk, NULL);
tsk              2746 fs/cifs/connect.c 	task = xchg(&server->tsk, NULL);
tsk              2852 fs/cifs/connect.c 	tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
tsk              2854 fs/cifs/connect.c 	if (IS_ERR(tcp_ses->tsk)) {
tsk              2855 fs/cifs/connect.c 		rc = PTR_ERR(tcp_ses->tsk);
tsk               362 fs/coredump.c  static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
tsk               369 fs/coredump.c  	spin_lock_irq(&tsk->sighand->siglock);
tsk               370 fs/coredump.c  	if (!signal_group_exit(tsk->signal)) {
tsk               372 fs/coredump.c  		tsk->signal->group_exit_task = tsk;
tsk               373 fs/coredump.c  		nr = zap_process(tsk, exit_code, 0);
tsk               374 fs/coredump.c  		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
tsk               376 fs/coredump.c  	spin_unlock_irq(&tsk->sighand->siglock);
tsk               380 fs/coredump.c  	tsk->flags |= PF_DUMPCORE;
tsk               415 fs/coredump.c  		if (g == tsk->group_leader)
tsk               440 fs/coredump.c  	struct task_struct *tsk = current;
tsk               441 fs/coredump.c  	struct mm_struct *mm = tsk->mm;
tsk               445 fs/coredump.c  	core_state->dumper.task = tsk;
tsk               452 fs/coredump.c  		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
tsk              1012 fs/exec.c      	struct task_struct *tsk;
tsk              1016 fs/exec.c      	tsk = current;
tsk              1018 fs/exec.c      	exec_mm_release(tsk, old_mm);
tsk              1034 fs/exec.c      	task_lock(tsk);
tsk              1035 fs/exec.c      	active_mm = tsk->active_mm;
tsk              1037 fs/exec.c      	tsk->mm = mm;
tsk              1038 fs/exec.c      	tsk->active_mm = mm;
tsk              1040 fs/exec.c      	tsk->mm->vmacache_seqnum = 0;
tsk              1041 fs/exec.c      	vmacache_flush(tsk);
tsk              1042 fs/exec.c      	task_unlock(tsk);
tsk              1046 fs/exec.c      		setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
tsk              1061 fs/exec.c      static int de_thread(struct task_struct *tsk)
tsk              1063 fs/exec.c      	struct signal_struct *sig = tsk->signal;
tsk              1064 fs/exec.c      	struct sighand_struct *oldsighand = tsk->sighand;
tsk              1067 fs/exec.c      	if (thread_group_empty(tsk))
tsk              1083 fs/exec.c      	sig->group_exit_task = tsk;
tsk              1084 fs/exec.c      	sig->notify_count = zap_other_threads(tsk);
tsk              1085 fs/exec.c      	if (!thread_group_leader(tsk))
tsk              1092 fs/exec.c      		if (__fatal_signal_pending(tsk))
tsk              1103 fs/exec.c      	if (!thread_group_leader(tsk)) {
tsk              1104 fs/exec.c      		struct task_struct *leader = tsk->group_leader;
tsk              1107 fs/exec.c      			cgroup_threadgroup_change_begin(tsk);
tsk              1118 fs/exec.c      			cgroup_threadgroup_change_end(tsk);
tsk              1120 fs/exec.c      			if (__fatal_signal_pending(tsk))
tsk              1134 fs/exec.c      		tsk->start_time = leader->start_time;
tsk              1135 fs/exec.c      		tsk->real_start_time = leader->real_start_time;
tsk              1137 fs/exec.c      		BUG_ON(!same_thread_group(leader, tsk));
tsk              1138 fs/exec.c      		BUG_ON(has_group_leader_pid(tsk));
tsk              1151 fs/exec.c      		tsk->pid = leader->pid;
tsk              1152 fs/exec.c      		change_pid(tsk, PIDTYPE_PID, task_pid(leader));
tsk              1153 fs/exec.c      		transfer_pid(leader, tsk, PIDTYPE_TGID);
tsk              1154 fs/exec.c      		transfer_pid(leader, tsk, PIDTYPE_PGID);
tsk              1155 fs/exec.c      		transfer_pid(leader, tsk, PIDTYPE_SID);
tsk              1157 fs/exec.c      		list_replace_rcu(&leader->tasks, &tsk->tasks);
tsk              1158 fs/exec.c      		list_replace_init(&leader->sibling, &tsk->sibling);
tsk              1160 fs/exec.c      		tsk->group_leader = tsk;
tsk              1161 fs/exec.c      		leader->group_leader = tsk;
tsk              1163 fs/exec.c      		tsk->exit_signal = SIGCHLD;
tsk              1177 fs/exec.c      		cgroup_threadgroup_change_end(tsk);
tsk              1187 fs/exec.c      	tsk->exit_signal = SIGCHLD;
tsk              1210 fs/exec.c      		rcu_assign_pointer(tsk->sighand, newsighand);
tsk              1217 fs/exec.c      	BUG_ON(!thread_group_leader(tsk));
tsk              1229 fs/exec.c      char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
tsk              1231 fs/exec.c      	task_lock(tsk);
tsk              1232 fs/exec.c      	strncpy(buf, tsk->comm, buf_size);
tsk              1233 fs/exec.c      	task_unlock(tsk);
tsk              1243 fs/exec.c      void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
tsk              1245 fs/exec.c      	task_lock(tsk);
tsk              1246 fs/exec.c      	trace_task_rename(tsk, buf);
tsk              1247 fs/exec.c      	strlcpy(tsk->comm, buf, sizeof(tsk->comm));
tsk              1248 fs/exec.c      	task_unlock(tsk);
tsk              1249 fs/exec.c      	perf_event_comm(tsk, exec);
tsk               427 fs/file.c      	struct task_struct *tsk = current;
tsk               430 fs/file.c      	old = tsk->files;
tsk               431 fs/file.c      	task_lock(tsk);
tsk               432 fs/file.c      	tsk->files = files;
tsk               433 fs/file.c      	task_unlock(tsk);
tsk               437 fs/file.c      void exit_files(struct task_struct *tsk)
tsk               439 fs/file.c      	struct files_struct * files = tsk->files;
tsk               442 fs/file.c      		task_lock(tsk);
tsk               443 fs/file.c      		tsk->files = NULL;
tsk               444 fs/file.c      		task_unlock(tsk);
tsk                95 fs/fs_struct.c void exit_fs(struct task_struct *tsk)
tsk                97 fs/fs_struct.c 	struct fs_struct *fs = tsk->fs;
tsk               101 fs/fs_struct.c 		task_lock(tsk);
tsk               103 fs/fs_struct.c 		tsk->fs = NULL;
tsk               106 fs/fs_struct.c 		task_unlock(tsk);
tsk                37 fs/jffs2/background.c 	struct task_struct *tsk;
tsk                45 fs/jffs2/background.c 	tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
tsk                46 fs/jffs2/background.c 	if (IS_ERR(tsk)) {
tsk                48 fs/jffs2/background.c 			-PTR_ERR(tsk));
tsk                50 fs/jffs2/background.c 		ret = PTR_ERR(tsk);
tsk                53 fs/jffs2/background.c 		jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
tsk                55 fs/jffs2/background.c 		ret = tsk->pid;
tsk               146 fs/proc/array.c static inline const char *get_task_state(struct task_struct *tsk)
tsk               149 fs/proc/array.c 	return task_state_array[task_state_index(tsk)];
tsk               340 fs/proc/base.c static ssize_t get_task_cmdline(struct task_struct *tsk, char __user *buf,
tsk               346 fs/proc/base.c 	mm = get_task_mm(tsk);
tsk               358 fs/proc/base.c 	struct task_struct *tsk;
tsk               363 fs/proc/base.c 	tsk = get_proc_task(file_inode(file));
tsk               364 fs/proc/base.c 	if (!tsk)
tsk               366 fs/proc/base.c 	ret = get_task_cmdline(tsk, buf, count, pos);
tsk               367 fs/proc/base.c 	put_task_struct(tsk);
tsk                16 include/asm-generic/mmu_context.h 			struct task_struct *tsk)
tsk                20 include/asm-generic/mmu_context.h static inline int init_new_context(struct task_struct *tsk,
tsk                37 include/asm-generic/mmu_context.h 			struct task_struct *tsk)
tsk               173 include/linux/audit.h static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
tsk               175 include/linux/audit.h 	return tsk->loginuid;
tsk               178 include/linux/audit.h static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
tsk               180 include/linux/audit.h 	return tsk->sessionid;
tsk               229 include/linux/audit.h static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
tsk               234 include/linux/audit.h static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
tsk               652 include/linux/audit.h static inline bool audit_loginuid_set(struct task_struct *tsk)
tsk               654 include/linux/audit.h 	return uid_valid(audit_get_loginuid(tsk));
tsk              1185 include/linux/blkdev.h static inline void blk_flush_plug(struct task_struct *tsk)
tsk              1187 include/linux/blkdev.h 	struct blk_plug *plug = tsk->plug;
tsk              1193 include/linux/blkdev.h static inline void blk_schedule_flush_plug(struct task_struct *tsk)
tsk              1195 include/linux/blkdev.h 	struct blk_plug *plug = tsk->plug;
tsk              1201 include/linux/blkdev.h static inline bool blk_needs_flush_plug(struct task_struct *tsk)
tsk              1203 include/linux/blkdev.h 	struct blk_plug *plug = tsk->plug;
tsk              1810 include/linux/blkdev.h static inline bool blk_needs_flush_plug(struct task_struct *tsk)
tsk               253 include/linux/capability.h extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
tsk               738 include/linux/cgroup-defs.h static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
tsk               749 include/linux/cgroup-defs.h static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
tsk               758 include/linux/cgroup-defs.h static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
tsk               763 include/linux/cgroup-defs.h static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
tsk               122 include/linux/cgroup.h 		     struct pid *pid, struct task_struct *tsk);
tsk               762 include/linux/cgroup.h void cpuacct_charge(struct task_struct *tsk, u64 cputime);
tsk               763 include/linux/cgroup.h void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
tsk               765 include/linux/cgroup.h static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
tsk               766 include/linux/cgroup.h static inline void cpuacct_account_field(struct task_struct *tsk, int index,
tsk               102 include/linux/cpuset.h 			    struct pid *pid, struct task_struct *tsk);
tsk               206 include/linux/cred.h static inline void validate_creds_for_do_exit(struct task_struct *tsk)
tsk                97 include/linux/delayacct.h static inline void delayacct_tsk_init(struct task_struct *tsk)
tsk               100 include/linux/delayacct.h 	tsk->delays = NULL;
tsk               102 include/linux/delayacct.h 		__delayacct_tsk_init(tsk);
tsk               108 include/linux/delayacct.h static inline void delayacct_tsk_free(struct task_struct *tsk)
tsk               110 include/linux/delayacct.h 	if (tsk->delays)
tsk               111 include/linux/delayacct.h 		kmem_cache_free(delayacct_cache, tsk->delays);
tsk               112 include/linux/delayacct.h 	tsk->delays = NULL;
tsk               130 include/linux/delayacct.h 					struct task_struct *tsk)
tsk               132 include/linux/delayacct.h 	if (!delayacct_on || !tsk->delays)
tsk               134 include/linux/delayacct.h 	return __delayacct_add_tsk(d, tsk);
tsk               137 include/linux/delayacct.h static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
tsk               139 include/linux/delayacct.h 	if (tsk->delays)
tsk               140 include/linux/delayacct.h 		return __delayacct_blkio_ticks(tsk);
tsk               175 include/linux/delayacct.h static inline void delayacct_tsk_init(struct task_struct *tsk)
tsk               177 include/linux/delayacct.h static inline void delayacct_tsk_free(struct task_struct *tsk)
tsk               184 include/linux/delayacct.h 					struct task_struct *tsk)
tsk               186 include/linux/delayacct.h static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
tsk                24 include/linux/fsl/bestcomm/ata.h extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
tsk                25 include/linux/fsl/bestcomm/ata.h extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
tsk                26 include/linux/fsl/bestcomm/ata.h extern void bcom_ata_reset_bd(struct bcom_task *tsk);
tsk                27 include/linux/fsl/bestcomm/ata.h extern void bcom_ata_release(struct bcom_task *tsk);
tsk                73 include/linux/fsl/bestcomm/bestcomm.h extern void bcom_enable(struct bcom_task *tsk);
tsk                82 include/linux/fsl/bestcomm/bestcomm.h extern void bcom_disable(struct bcom_task *tsk);
tsk                90 include/linux/fsl/bestcomm/bestcomm.h bcom_get_task_irq(struct bcom_task *tsk) {
tsk                91 include/linux/fsl/bestcomm/bestcomm.h 	return tsk->irq;
tsk               106 include/linux/fsl/bestcomm/bestcomm.h _bcom_next_index(struct bcom_task *tsk)
tsk               108 include/linux/fsl/bestcomm/bestcomm.h 	return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1;
tsk               117 include/linux/fsl/bestcomm/bestcomm.h _bcom_next_outdex(struct bcom_task *tsk)
tsk               119 include/linux/fsl/bestcomm/bestcomm.h 	return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1;
tsk               127 include/linux/fsl/bestcomm/bestcomm.h bcom_queue_empty(struct bcom_task *tsk)
tsk               129 include/linux/fsl/bestcomm/bestcomm.h 	return tsk->index == tsk->outdex;
tsk               137 include/linux/fsl/bestcomm/bestcomm.h bcom_queue_full(struct bcom_task *tsk)
tsk               139 include/linux/fsl/bestcomm/bestcomm.h 	return tsk->outdex == _bcom_next_index(tsk);
tsk               148 include/linux/fsl/bestcomm/bestcomm.h *bcom_get_bd(struct bcom_task *tsk, unsigned int index)
tsk               152 include/linux/fsl/bestcomm/bestcomm.h 	return ((void *)tsk->bd) + (index * tsk->bd_size);
tsk               160 include/linux/fsl/bestcomm/bestcomm.h bcom_buffer_done(struct bcom_task *tsk)
tsk               163 include/linux/fsl/bestcomm/bestcomm.h 	if (bcom_queue_empty(tsk))
tsk               166 include/linux/fsl/bestcomm/bestcomm.h 	bd = bcom_get_bd(tsk, tsk->outdex);
tsk               177 include/linux/fsl/bestcomm/bestcomm.h bcom_prepare_next_buffer(struct bcom_task *tsk)
tsk               181 include/linux/fsl/bestcomm/bestcomm.h 	bd = bcom_get_bd(tsk, tsk->index);
tsk               187 include/linux/fsl/bestcomm/bestcomm.h bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
tsk               189 include/linux/fsl/bestcomm/bestcomm.h 	struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
tsk               191 include/linux/fsl/bestcomm/bestcomm.h 	tsk->cookie[tsk->index] = cookie;
tsk               194 include/linux/fsl/bestcomm/bestcomm.h 	tsk->index = _bcom_next_index(tsk);
tsk               195 include/linux/fsl/bestcomm/bestcomm.h 	if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
tsk               196 include/linux/fsl/bestcomm/bestcomm.h 		bcom_enable(tsk);
tsk               200 include/linux/fsl/bestcomm/bestcomm.h bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
tsk               202 include/linux/fsl/bestcomm/bestcomm.h 	void *cookie = tsk->cookie[tsk->outdex];
tsk               203 include/linux/fsl/bestcomm/bestcomm.h 	struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
tsk               209 include/linux/fsl/bestcomm/bestcomm.h 	tsk->outdex = _bcom_next_outdex(tsk);
tsk               237 include/linux/fsl/bestcomm/bestcomm_priv.h extern void bcom_task_free(struct bcom_task *tsk);
tsk                44 include/linux/fsl/bestcomm/fec.h bcom_fec_rx_reset(struct bcom_task *tsk);
tsk                47 include/linux/fsl/bestcomm/fec.h bcom_fec_rx_release(struct bcom_task *tsk);
tsk                54 include/linux/fsl/bestcomm/fec.h bcom_fec_tx_reset(struct bcom_task *tsk);
tsk                57 include/linux/fsl/bestcomm/fec.h bcom_fec_tx_release(struct bcom_task *tsk);
tsk                24 include/linux/fsl/bestcomm/gen_bd.h bcom_gen_bd_rx_reset(struct bcom_task *tsk);
tsk                27 include/linux/fsl/bestcomm/gen_bd.h bcom_gen_bd_rx_release(struct bcom_task *tsk);
tsk                35 include/linux/fsl/bestcomm/gen_bd.h bcom_gen_bd_tx_reset(struct bcom_task *tsk);
tsk                38 include/linux/fsl/bestcomm/gen_bd.h bcom_gen_bd_tx_release(struct bcom_task *tsk);
tsk               858 include/linux/ftrace.h static inline void set_tsk_trace_trace(struct task_struct *tsk)
tsk               860 include/linux/ftrace.h 	set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
tsk               863 include/linux/ftrace.h static inline void clear_tsk_trace_trace(struct task_struct *tsk)
tsk               865 include/linux/ftrace.h 	clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
tsk               868 include/linux/ftrace.h static inline int test_tsk_trace_trace(struct task_struct *tsk)
tsk               870 include/linux/ftrace.h 	return tsk->trace & TSK_TRACE_FL_TRACE;
tsk               873 include/linux/ftrace.h static inline void set_tsk_trace_graph(struct task_struct *tsk)
tsk               875 include/linux/ftrace.h 	set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
tsk               878 include/linux/ftrace.h static inline void clear_tsk_trace_graph(struct task_struct *tsk)
tsk               880 include/linux/ftrace.h 	clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
tsk               883 include/linux/ftrace.h static inline int test_tsk_trace_graph(struct task_struct *tsk)
tsk               885 include/linux/ftrace.h 	return tsk->trace & TSK_TRACE_FL_GRAPH;
tsk                62 include/linux/futex.h static inline void futex_init_task(struct task_struct *tsk)
tsk                64 include/linux/futex.h 	tsk->robust_list = NULL;
tsk                66 include/linux/futex.h 	tsk->compat_robust_list = NULL;
tsk                68 include/linux/futex.h 	INIT_LIST_HEAD(&tsk->pi_state_list);
tsk                69 include/linux/futex.h 	tsk->pi_state_cache = NULL;
tsk                70 include/linux/futex.h 	tsk->futex_state = FUTEX_STATE_OK;
tsk                71 include/linux/futex.h 	mutex_init(&tsk->futex_exit_mutex);
tsk                74 include/linux/futex.h void futex_exit_recursive(struct task_struct *tsk);
tsk                75 include/linux/futex.h void futex_exit_release(struct task_struct *tsk);
tsk                76 include/linux/futex.h void futex_exec_release(struct task_struct *tsk);
tsk                81 include/linux/futex.h static inline void futex_init_task(struct task_struct *tsk) { }
tsk                82 include/linux/futex.h static inline void futex_exit_recursive(struct task_struct *tsk) { }
tsk                83 include/linux/futex.h static inline void futex_exit_release(struct task_struct *tsk) { }
tsk                84 include/linux/futex.h static inline void futex_exec_release(struct task_struct *tsk) { }
tsk                51 include/linux/hw_breakpoint.h 			    struct task_struct *tsk);
tsk                84 include/linux/hw_breakpoint.h extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
tsk                99 include/linux/hw_breakpoint.h 			    struct task_struct *tsk)	{ return NULL; }
tsk               127 include/linux/hw_breakpoint.h static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk)	{ }
tsk                90 include/linux/kernel_stat.h static inline void account_process_tick(struct task_struct *tsk, int user)
tsk                92 include/linux/kernel_stat.h 	vtime_flush(tsk);
tsk               332 include/linux/kprobes.h void kretprobe_hash_lock(struct task_struct *tsk,
tsk               334 include/linux/kprobes.h void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
tsk               335 include/linux/kprobes.h struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
tsk                68 include/linux/kthread.h extern int tsk_fork_get_node(struct task_struct *tsk);
tsk               388 include/linux/lockdep.h #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
tsk               406 include/linux/lockdep.h #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
tsk               474 include/linux/lockdep.h #define lockdep_depth(tsk)	(0)
tsk               483 include/linux/lockdep.h #define lockdep_recursing(tsk)			(0)
tsk                84 include/linux/lsm_audit.h 		struct task_struct *tsk;
tsk              1614 include/linux/lsm_hooks.h 	int (*file_send_sigiotask)(struct task_struct *tsk,
tsk               146 include/linux/mempolicy.h extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
tsk               153 include/linux/mempolicy.h extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
tsk               262 include/linux/mempolicy.h static inline void mpol_rebind_task(struct task_struct *tsk,
tsk              1490 include/linux/mm.h extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
tsk              1505 include/linux/mm.h static inline int fixup_user_fault(struct task_struct *tsk,
tsk              1525 include/linux/mm.h extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
tsk              1529 include/linux/mm.h extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
tsk              1532 include/linux/mm.h long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
tsk              2428 include/linux/mm.h void task_dirty_inc(struct task_struct *tsk);
tsk                68 include/linux/nsproxy.h int copy_namespaces(unsigned long flags, struct task_struct *tsk);
tsk                69 include/linux/nsproxy.h void exit_task_namespaces(struct task_struct *tsk);
tsk                70 include/linux/nsproxy.h void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
tsk                74 include/linux/oom.h static inline bool tsk_is_oom_victim(struct task_struct * tsk)
tsk                76 include/linux/oom.h 	return tsk->signal->oom_mm;
tsk              1183 include/linux/perf_event.h extern void perf_event_comm(struct task_struct *tsk, bool exec);
tsk              1184 include/linux/perf_event.h extern void perf_event_namespaces(struct task_struct *tsk);
tsk              1185 include/linux/perf_event.h extern void perf_event_fork(struct task_struct *tsk);
tsk              1406 include/linux/perf_event.h static inline void perf_event_comm(struct task_struct *tsk, bool exec)	{ }
tsk              1407 include/linux/perf_event.h static inline void perf_event_namespaces(struct task_struct *tsk)	{ }
tsk              1408 include/linux/perf_event.h static inline void perf_event_fork(struct task_struct *tsk)		{ }
tsk                99 include/linux/pid_namespace.h extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
tsk                36 include/linux/pkeys.h static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
tsk                20 include/linux/ptrace.h extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
tsk                58 include/linux/ptrace.h extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
tsk                59 include/linux/ptrace.h extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
tsk               108 include/linux/ptrace.h int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
tsk               110 include/linux/ptrace.h int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
tsk               239 include/linux/regset.h const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
tsk                11 include/linux/resource.h int do_prlimit(struct task_struct *tsk, unsigned int resource,
tsk                71 include/linux/rtmutex.h  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
tsk              1306 include/linux/sched.h static inline pid_t task_pid_nr(struct task_struct *tsk)
tsk              1308 include/linux/sched.h 	return tsk->pid;
tsk              1311 include/linux/sched.h static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
tsk              1313 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
tsk              1316 include/linux/sched.h static inline pid_t task_pid_vnr(struct task_struct *tsk)
tsk              1318 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
tsk              1322 include/linux/sched.h static inline pid_t task_tgid_nr(struct task_struct *tsk)
tsk              1324 include/linux/sched.h 	return tsk->tgid;
tsk              1342 include/linux/sched.h static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
tsk              1344 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
tsk              1347 include/linux/sched.h static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
tsk              1349 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
tsk              1353 include/linux/sched.h static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
tsk              1355 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
tsk              1358 include/linux/sched.h static inline pid_t task_session_vnr(struct task_struct *tsk)
tsk              1360 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
tsk              1363 include/linux/sched.h static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
tsk              1365 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
tsk              1368 include/linux/sched.h static inline pid_t task_tgid_vnr(struct task_struct *tsk)
tsk              1370 include/linux/sched.h 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
tsk              1373 include/linux/sched.h static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
tsk              1378 include/linux/sched.h 	if (pid_alive(tsk))
tsk              1379 include/linux/sched.h 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
tsk              1385 include/linux/sched.h static inline pid_t task_ppid_nr(const struct task_struct *tsk)
tsk              1387 include/linux/sched.h 	return task_ppid_nr_ns(tsk, &init_pid_ns);
tsk              1391 include/linux/sched.h static inline pid_t task_pgrp_nr(struct task_struct *tsk)
tsk              1393 include/linux/sched.h 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
tsk              1399 include/linux/sched.h static inline unsigned int task_state_index(struct task_struct *tsk)
tsk              1401 include/linux/sched.h 	unsigned int tsk_state = READ_ONCE(tsk->state);
tsk              1402 include/linux/sched.h 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
tsk              1421 include/linux/sched.h static inline char task_state_to_char(struct task_struct *tsk)
tsk              1423 include/linux/sched.h 	return task_index_to_char(task_state_index(tsk));
tsk              1435 include/linux/sched.h static inline int is_global_init(struct task_struct *tsk)
tsk              1437 include/linux/sched.h 	return task_tgid_nr(tsk) == 1;
tsk              1673 include/linux/sched.h extern int wake_up_state(struct task_struct *tsk, unsigned int state);
tsk              1674 include/linux/sched.h extern int wake_up_process(struct task_struct *tsk);
tsk              1675 include/linux/sched.h extern void wake_up_new_task(struct task_struct *tsk);
tsk              1678 include/linux/sched.h extern void kick_process(struct task_struct *tsk);
tsk              1680 include/linux/sched.h static inline void kick_process(struct task_struct *tsk) { }
tsk              1683 include/linux/sched.h extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
tsk              1685 include/linux/sched.h static inline void set_task_comm(struct task_struct *tsk, const char *from)
tsk              1687 include/linux/sched.h 	__set_task_comm(tsk, from, false);
tsk              1690 include/linux/sched.h extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
tsk              1691 include/linux/sched.h #define get_task_comm(buf, tsk) ({			\
tsk              1693 include/linux/sched.h 	__get_task_comm(buf, sizeof(buf), tsk);		\
tsk              1711 include/linux/sched.h static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
tsk              1713 include/linux/sched.h 	set_ti_thread_flag(task_thread_info(tsk), flag);
tsk              1716 include/linux/sched.h static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
tsk              1718 include/linux/sched.h 	clear_ti_thread_flag(task_thread_info(tsk), flag);
tsk              1721 include/linux/sched.h static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
tsk              1724 include/linux/sched.h 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
tsk              1727 include/linux/sched.h static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
tsk              1729 include/linux/sched.h 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
tsk              1732 include/linux/sched.h static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
tsk              1734 include/linux/sched.h 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
tsk              1737 include/linux/sched.h static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
tsk              1739 include/linux/sched.h 	return test_ti_thread_flag(task_thread_info(tsk), flag);
tsk              1742 include/linux/sched.h static inline void set_tsk_need_resched(struct task_struct *tsk)
tsk              1744 include/linux/sched.h 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
tsk              1747 include/linux/sched.h static inline void clear_tsk_need_resched(struct task_struct *tsk)
tsk              1749 include/linux/sched.h 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
tsk              1752 include/linux/sched.h static inline int test_tsk_need_resched(struct task_struct *tsk)
tsk              1754 include/linux/sched.h 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
tsk              1857 include/linux/sched.h #define TASK_SIZE_OF(tsk)	TASK_SIZE
tsk              1968 include/linux/sched.h void __exit_umh(struct task_struct *tsk);
tsk              1970 include/linux/sched.h static inline void exit_umh(struct task_struct *tsk)
tsk              1972 include/linux/sched.h 	if (unlikely(tsk->flags & PF_UMH))
tsk              1973 include/linux/sched.h 		__exit_umh(tsk);
tsk                63 include/linux/sched/cputime.h void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
tsk                64 include/linux/sched/cputime.h void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples);
tsk                79 include/linux/sched/cputime.h struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
tsk                81 include/linux/sched/cputime.h 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
tsk                87 include/linux/sched/cputime.h 	if (!READ_ONCE(tsk->signal->posix_cputimers.timers_active))
tsk               104 include/linux/sched/cputime.h 	if (unlikely(!tsk->sighand))
tsk               111 include/linux/sched/cputime.h struct thread_group_cputimer *get_running_cputimer(struct task_struct *tsk)
tsk               127 include/linux/sched/cputime.h static inline void account_group_user_time(struct task_struct *tsk,
tsk               130 include/linux/sched/cputime.h 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
tsk               148 include/linux/sched/cputime.h static inline void account_group_system_time(struct task_struct *tsk,
tsk               151 include/linux/sched/cputime.h 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
tsk               169 include/linux/sched/cputime.h static inline void account_group_exec_runtime(struct task_struct *tsk,
tsk               172 include/linux/sched/cputime.h 	struct thread_group_cputimer *cputimer = get_running_cputimer(tsk);
tsk               148 include/linux/sched/mm.h static inline bool in_vfork(struct task_struct *tsk)
tsk               168 include/linux/sched/mm.h 	ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
tsk               381 include/linux/sched/mm.h 					     struct task_struct *tsk)
tsk                21 include/linux/sched/rt.h static inline bool task_is_realtime(struct task_struct *tsk)
tsk                23 include/linux/sched/rt.h 	int policy = tsk->policy;
tsk                42 include/linux/sched/rt.h static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
tsk                44 include/linux/sched/rt.h 	return tsk->pi_blocked_on != NULL;
tsk                52 include/linux/sched/rt.h static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
tsk                80 include/linux/sched/task.h extern void exit_thread(struct task_struct *tsk);
tsk                82 include/linux/sched/task.h static inline void exit_thread(struct task_struct *tsk)
tsk                99 include/linux/sched/task.h extern void free_task(struct task_struct *tsk);
tsk                62 include/linux/sched/task_stack.h static inline void *try_get_task_stack(struct task_struct *tsk)
tsk                64 include/linux/sched/task_stack.h 	return refcount_inc_not_zero(&tsk->stack_refcount) ?
tsk                65 include/linux/sched/task_stack.h 		task_stack_page(tsk) : NULL;
tsk                68 include/linux/sched/task_stack.h extern void put_task_stack(struct task_struct *tsk);
tsk                70 include/linux/sched/task_stack.h static inline void *try_get_task_stack(struct task_struct *tsk)
tsk                72 include/linux/sched/task_stack.h 	return task_stack_page(tsk);
tsk                75 include/linux/sched/task_stack.h static inline void put_task_stack(struct task_struct *tsk) {}
tsk               110 include/linux/sched/task_stack.h extern void set_task_stack_end_magic(struct task_struct *tsk);
tsk                12 include/linux/sched/xacct.h static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
tsk                14 include/linux/sched/xacct.h 	tsk->ioac.rchar += amt;
tsk                17 include/linux/sched/xacct.h static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
tsk                19 include/linux/sched/xacct.h 	tsk->ioac.wchar += amt;
tsk                22 include/linux/sched/xacct.h static inline void inc_syscr(struct task_struct *tsk)
tsk                24 include/linux/sched/xacct.h 	tsk->ioac.syscr++;
tsk                27 include/linux/sched/xacct.h static inline void inc_syscw(struct task_struct *tsk)
tsk                29 include/linux/sched/xacct.h 	tsk->ioac.syscw++;
tsk                32 include/linux/sched/xacct.h static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
tsk                36 include/linux/sched/xacct.h static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
tsk                40 include/linux/sched/xacct.h static inline void inc_syscr(struct task_struct *tsk)
tsk                44 include/linux/sched/xacct.h static inline void inc_syscw(struct task_struct *tsk)
tsk                84 include/linux/seccomp.h extern void put_seccomp_filter(struct task_struct *tsk);
tsk                85 include/linux/seccomp.h extern void get_seccomp_filter(struct task_struct *tsk);
tsk                87 include/linux/seccomp.h static inline void put_seccomp_filter(struct task_struct *tsk)
tsk                91 include/linux/seccomp.h static inline void get_seccomp_filter(struct task_struct *tsk)
tsk               371 include/linux/security.h int security_file_send_sigiotask(struct task_struct *tsk,
tsk               942 include/linux/security.h static inline int security_file_send_sigiotask(struct task_struct *tsk,
tsk                16 include/linux/sem.h extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
tsk                17 include/linux/sem.h extern void exit_sem(struct task_struct *tsk);
tsk                25 include/linux/sem.h static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
tsk                30 include/linux/sem.h static inline void exit_sem(struct task_struct *tsk)
tsk               282 include/linux/signal.h extern void exit_signals(struct task_struct *tsk);
tsk               315 include/linux/signal.h extern bool unhandled_signal(struct task_struct *tsk, int sig);
tsk                19 include/linux/signalfd.h static inline void signalfd_notify(struct task_struct *tsk, int sig)
tsk                21 include/linux/signalfd.h 	if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh)))
tsk                22 include/linux/signalfd.h 		wake_up(&tsk->sighand->signalfd_wqh);
tsk                29 include/linux/signalfd.h static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
tsk                73 include/linux/stacktrace.h extern void save_stack_trace_tsk(struct task_struct *tsk,
tsk                75 include/linux/stacktrace.h extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
tsk                82 include/linux/stacktrace.h int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
tsk                85 include/linux/stacktrace.h static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
tsk                28 include/linux/taskstats_kern.h static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
tsk               201 include/linux/tick.h extern void tick_nohz_dep_set_task(struct task_struct *tsk,
tsk               203 include/linux/tick.h extern void tick_nohz_dep_clear_task(struct task_struct *tsk,
tsk               238 include/linux/tick.h static inline void tick_dep_set_task(struct task_struct *tsk,
tsk               242 include/linux/tick.h 		tick_nohz_dep_set_task(tsk, bit);
tsk               244 include/linux/tick.h static inline void tick_dep_clear_task(struct task_struct *tsk,
tsk               248 include/linux/tick.h 		tick_nohz_dep_clear_task(tsk, bit);
tsk               275 include/linux/tick.h static inline void tick_dep_set_task(struct task_struct *tsk,
tsk               277 include/linux/tick.h static inline void tick_dep_clear_task(struct task_struct *tsk,
tsk                16 include/linux/tsacct_kern.h 			  struct taskstats *stats, struct task_struct *tsk);
tsk                20 include/linux/tsacct_kern.h 				 struct taskstats *stats, struct task_struct *tsk)
tsk                26 include/linux/tsacct_kern.h extern void acct_update_integrals(struct task_struct *tsk);
tsk                27 include/linux/tsacct_kern.h extern void acct_account_cputime(struct task_struct *tsk);
tsk                28 include/linux/tsacct_kern.h extern void acct_clear_integrals(struct task_struct *tsk);
tsk                32 include/linux/tsacct_kern.h static inline void acct_update_integrals(struct task_struct *tsk)
tsk                34 include/linux/tsacct_kern.h static inline void acct_account_cputime(struct task_struct *tsk)
tsk                36 include/linux/tsacct_kern.h static inline void acct_clear_integrals(struct task_struct *tsk)
tsk               133 include/linux/uprobes.h extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
tsk                 8 include/linux/vmacache.h static inline void vmacache_flush(struct task_struct *tsk)
tsk                10 include/linux/vmacache.h 	memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
tsk                60 include/linux/vtime.h extern void vtime_account_system(struct task_struct *tsk);
tsk                61 include/linux/vtime.h extern void vtime_account_idle(struct task_struct *tsk);
tsk                66 include/linux/vtime.h static inline void vtime_account_system(struct task_struct *tsk) { }
tsk                70 include/linux/vtime.h extern void arch_vtime_task_switch(struct task_struct *tsk);
tsk                71 include/linux/vtime.h extern void vtime_user_enter(struct task_struct *tsk);
tsk                72 include/linux/vtime.h extern void vtime_user_exit(struct task_struct *tsk);
tsk                73 include/linux/vtime.h extern void vtime_guest_enter(struct task_struct *tsk);
tsk                74 include/linux/vtime.h extern void vtime_guest_exit(struct task_struct *tsk);
tsk                75 include/linux/vtime.h extern void vtime_init_idle(struct task_struct *tsk, int cpu);
tsk                77 include/linux/vtime.h static inline void vtime_user_enter(struct task_struct *tsk) { }
tsk                78 include/linux/vtime.h static inline void vtime_user_exit(struct task_struct *tsk) { }
tsk                79 include/linux/vtime.h static inline void vtime_guest_enter(struct task_struct *tsk) { }
tsk                80 include/linux/vtime.h static inline void vtime_guest_exit(struct task_struct *tsk) { }
tsk                81 include/linux/vtime.h static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
tsk                85 include/linux/vtime.h extern void vtime_account_irq_enter(struct task_struct *tsk);
tsk                86 include/linux/vtime.h static inline void vtime_account_irq_exit(struct task_struct *tsk)
tsk                89 include/linux/vtime.h 	vtime_account_system(tsk);
tsk                91 include/linux/vtime.h extern void vtime_flush(struct task_struct *tsk);
tsk                93 include/linux/vtime.h static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
tsk                94 include/linux/vtime.h static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
tsk                95 include/linux/vtime.h static inline void vtime_flush(struct task_struct *tsk) { }
tsk               100 include/linux/vtime.h extern void irqtime_account_irq(struct task_struct *tsk);
tsk               102 include/linux/vtime.h static inline void irqtime_account_irq(struct task_struct *tsk) { }
tsk               105 include/linux/vtime.h static inline void account_irq_enter_time(struct task_struct *tsk)
tsk               107 include/linux/vtime.h 	vtime_account_irq_enter(tsk);
tsk               108 include/linux/vtime.h 	irqtime_account_irq(tsk);
tsk               111 include/linux/vtime.h static inline void account_irq_exit_time(struct task_struct *tsk)
tsk               113 include/linux/vtime.h 	vtime_account_irq_exit(tsk);
tsk               114 include/linux/vtime.h 	irqtime_account_irq(tsk);
tsk                46 include/linux/wait.h #define __WAITQUEUE_INITIALIZER(name, tsk) {					\
tsk                47 include/linux/wait.h 	.private	= tsk,							\
tsk                51 include/linux/wait.h #define DECLARE_WAITQUEUE(name, tsk)						\
tsk                52 include/linux/wait.h 	struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
tsk               353 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, u64 delay),
tsk               355 include/trace/events/sched.h 	TP_ARGS(__perf_task(tsk), __perf_count(delay)),
tsk               364 include/trace/events/sched.h 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
tsk               365 include/trace/events/sched.h 		__entry->pid	= tsk->pid;
tsk               379 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *tsk, u64 delay),
tsk               380 include/trace/events/sched.h 	     TP_ARGS(tsk, delay));
tsk               387 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *tsk, u64 delay),
tsk               388 include/trace/events/sched.h 	     TP_ARGS(tsk, delay));
tsk               395 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *tsk, u64 delay),
tsk               396 include/trace/events/sched.h 	     TP_ARGS(tsk, delay));
tsk               402 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *tsk, u64 delay),
tsk               403 include/trace/events/sched.h 	     TP_ARGS(tsk, delay));
tsk               411 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
tsk               413 include/trace/events/sched.h 	TP_ARGS(tsk, __perf_count(runtime), vruntime),
tsk               423 include/trace/events/sched.h 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
tsk               424 include/trace/events/sched.h 		__entry->pid		= tsk->pid;
tsk               436 include/trace/events/sched.h 	     TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
tsk               437 include/trace/events/sched.h 	     TP_ARGS(tsk, runtime, vruntime));
tsk               445 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
tsk               447 include/trace/events/sched.h 	TP_ARGS(tsk, pi_task),
tsk               457 include/trace/events/sched.h 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
tsk               458 include/trace/events/sched.h 		__entry->pid		= tsk->pid;
tsk               459 include/trace/events/sched.h 		__entry->oldprio	= tsk->prio;
tsk               461 include/trace/events/sched.h 				min(tsk->normal_prio, pi_task->prio) :
tsk               462 include/trace/events/sched.h 				tsk->normal_prio;
tsk               473 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk),
tsk               474 include/trace/events/sched.h 	TP_ARGS(tsk),
tsk               482 include/trace/events/sched.h 		memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
tsk               483 include/trace/events/sched.h 		__entry->pid = tsk->pid;
tsk               492 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
tsk               494 include/trace/events/sched.h 	TP_ARGS(tsk, src_cpu, dst_cpu),
tsk               507 include/trace/events/sched.h 		__entry->pid		= task_pid_nr(tsk);
tsk               508 include/trace/events/sched.h 		__entry->tgid		= task_tgid_nr(tsk);
tsk               509 include/trace/events/sched.h 		__entry->ngid		= task_numa_group_id(tsk);
tsk               527 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
tsk               529 include/trace/events/sched.h 	TP_ARGS(tsk, src_cpu, dst_cpu)
tsk               533 include/trace/events/sched.h 	TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
tsk               535 include/trace/events/sched.h 	TP_ARGS(tsk, src_cpu, dst_cpu)
tsk               112 include/video/uvesafb.h static int uvesafb_exec(struct uvesafb_ktask *tsk);
tsk               408 init/main.c    	struct task_struct *tsk;
tsk               424 init/main.c    	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
tsk               425 init/main.c    	set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
tsk                79 ipc/msg.c      	struct task_struct	*tsk;
tsk               185 ipc/msg.c      	mss->tsk = current;
tsk               213 ipc/msg.c      		else if (stop_tsk == mss->tsk)
tsk               224 ipc/msg.c      				stop_tsk = mss->tsk;
tsk               230 ipc/msg.c      		wake_q_add(wake_q, mss->tsk);
tsk              2269 ipc/sem.c      int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
tsk              2279 ipc/sem.c      		tsk->sysvsem.undo_list = undo_list;
tsk              2281 ipc/sem.c      		tsk->sysvsem.undo_list = NULL;
tsk              2298 ipc/sem.c      void exit_sem(struct task_struct *tsk)
tsk              2302 ipc/sem.c      	ulp = tsk->sysvsem.undo_list;
tsk              2305 ipc/sem.c      	tsk->sysvsem.undo_list = NULL;
tsk              2343 ipc/sem.c      		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
tsk               960 kernel/audit.c 	struct task_struct *tsk;
tsk               975 kernel/audit.c 	tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
tsk               976 kernel/audit.c 	if (!IS_ERR(tsk))
tsk               273 kernel/audit.h extern int audit_exe_compare(struct task_struct *tsk,
tsk               290 kernel/audit.h extern void audit_filter_inodes(struct task_struct *tsk,
tsk               541 kernel/audit_watch.c int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
tsk               547 kernel/audit_watch.c 	exe_file = get_task_exe_file(tsk);
tsk              1166 kernel/auditfilter.c 	struct task_struct *tsk;
tsk              1187 kernel/auditfilter.c 	tsk = kthread_run(audit_send_list, dest, "audit_send_list");
tsk              1188 kernel/auditfilter.c 	if (IS_ERR(tsk)) {
tsk              1191 kernel/auditfilter.c 		err = PTR_ERR(tsk);
tsk               354 kernel/auditsc.c static int audit_field_compare(struct task_struct *tsk,
tsk               371 kernel/auditsc.c 		return audit_compare_uid(audit_get_loginuid(tsk), name, f, ctx);
tsk               383 kernel/auditsc.c 					    audit_get_loginuid(tsk));
tsk               392 kernel/auditsc.c 		return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
tsk               395 kernel/auditsc.c 		return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
tsk               398 kernel/auditsc.c 		return audit_uid_comparator(audit_get_loginuid(tsk), f->op,
tsk               438 kernel/auditsc.c static int audit_filter_rules(struct task_struct *tsk,
tsk               450 kernel/auditsc.c 	cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
tsk               460 kernel/auditsc.c 			pid = task_tgid_nr(tsk);
tsk               466 kernel/auditsc.c 					ctx->ppid = task_ppid_nr(tsk);
tsk               471 kernel/auditsc.c 			result = audit_exe_compare(tsk, rule->exe);
tsk               514 kernel/auditsc.c 			sessionid = audit_get_sessionid(tsk);
tsk               518 kernel/auditsc.c 			result = audit_comparator(tsk->personality, f->op, f->val);
tsk               620 kernel/auditsc.c 			result = audit_uid_comparator(audit_get_loginuid(tsk),
tsk               624 kernel/auditsc.c 			result = audit_comparator(audit_loginuid_set(tsk), f->op, f->val);
tsk               643 kernel/auditsc.c 					security_task_getsecid(tsk, &sid);
tsk               709 kernel/auditsc.c 			result = audit_field_compare(tsk, cred, f, ctx, name);
tsk               740 kernel/auditsc.c static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
tsk               747 kernel/auditsc.c 		if (audit_filter_rules(tsk, &e->rule, NULL, NULL,
tsk               780 kernel/auditsc.c static enum audit_state audit_filter_syscall(struct task_struct *tsk,
tsk               787 kernel/auditsc.c 	if (auditd_test_task(tsk))
tsk               793 kernel/auditsc.c 		    audit_filter_rules(tsk, &e->rule, ctx, NULL,
tsk               808 kernel/auditsc.c static int audit_filter_inode_name(struct task_struct *tsk,
tsk               818 kernel/auditsc.c 		    audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
tsk               831 kernel/auditsc.c void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
tsk               835 kernel/auditsc.c 	if (auditd_test_task(tsk))
tsk               841 kernel/auditsc.c 		if (audit_filter_inode_name(tsk, n, ctx))
tsk               915 kernel/auditsc.c int audit_alloc(struct task_struct *tsk)
tsk               924 kernel/auditsc.c 	state = audit_filter_task(tsk, &key);
tsk               926 kernel/auditsc.c 		clear_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
tsk               937 kernel/auditsc.c 	audit_set_context(tsk, context);
tsk               938 kernel/auditsc.c 	set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
tsk              1582 kernel/auditsc.c void __audit_free(struct task_struct *tsk)
tsk              1584 kernel/auditsc.c 	struct audit_context *context = tsk->audit_context;
tsk              1597 kernel/auditsc.c 	if (tsk == current && !context->dummy && context->in_syscall) {
tsk              1601 kernel/auditsc.c 		audit_filter_syscall(tsk, context,
tsk              1603 kernel/auditsc.c 		audit_filter_inodes(tsk, context);
tsk              1608 kernel/auditsc.c 	audit_set_context(tsk, NULL);
tsk               518 kernel/capability.c bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
tsk               524 kernel/capability.c 	cred = rcu_dereference(tsk->ptracer_cred);
tsk                59 kernel/cgroup/cgroup-v1.c int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
tsk                76 kernel/cgroup/cgroup-v1.c 		retval = cgroup_attach_task(from_cgrp, tsk, false);
tsk               337 kernel/cgroup/cgroup-v1.c 	struct task_struct *tsk;
tsk               354 kernel/cgroup/cgroup-v1.c 	while ((tsk = css_task_iter_next(&it))) {
tsk               359 kernel/cgroup/cgroup-v1.c 			pid = task_tgid_vnr(tsk);
tsk               361 kernel/cgroup/cgroup-v1.c 			pid = task_pid_vnr(tsk);
tsk               695 kernel/cgroup/cgroup-v1.c 	struct task_struct *tsk;
tsk               719 kernel/cgroup/cgroup-v1.c 	while ((tsk = css_task_iter_next(&it))) {
tsk               720 kernel/cgroup/cgroup-v1.c 		switch (tsk->state) {
tsk               734 kernel/cgroup/cgroup-v1.c 			if (delayacct_is_task_waiting_on_io(tsk))
tsk              2831 kernel/cgroup/cgroup.c 	struct task_struct *tsk;
tsk              2841 kernel/cgroup/cgroup.c 		tsk = find_task_by_vpid(pid);
tsk              2842 kernel/cgroup/cgroup.c 		if (!tsk) {
tsk              2843 kernel/cgroup/cgroup.c 			tsk = ERR_PTR(-ESRCH);
tsk              2847 kernel/cgroup/cgroup.c 		tsk = current;
tsk              2851 kernel/cgroup/cgroup.c 		tsk = tsk->group_leader;
tsk              2859 kernel/cgroup/cgroup.c 	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
tsk              2860 kernel/cgroup/cgroup.c 		tsk = ERR_PTR(-EINVAL);
tsk              2864 kernel/cgroup/cgroup.c 	get_task_struct(tsk);
tsk              2871 kernel/cgroup/cgroup.c 	return tsk;
tsk              5871 kernel/cgroup/cgroup.c 		     struct pid *pid, struct task_struct *tsk)
tsk              5904 kernel/cgroup/cgroup.c 		cgrp = task_cgroup_from_root(tsk, root);
tsk              5915 kernel/cgroup/cgroup.c 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
tsk              6104 kernel/cgroup/cgroup.c void cgroup_exit(struct task_struct *tsk)
tsk              6114 kernel/cgroup/cgroup.c 	cset = task_css_set(tsk);
tsk              6116 kernel/cgroup/cgroup.c 	if (!list_empty(&tsk->cg_list)) {
tsk              6118 kernel/cgroup/cgroup.c 		css_set_move_task(tsk, cset, NULL, false);
tsk              6119 kernel/cgroup/cgroup.c 		list_add_tail(&tsk->cg_list, &cset->dying_tasks);
tsk              6122 kernel/cgroup/cgroup.c 		WARN_ON_ONCE(cgroup_task_frozen(tsk));
tsk              6123 kernel/cgroup/cgroup.c 		if (unlikely(cgroup_task_freeze(tsk)))
tsk              6124 kernel/cgroup/cgroup.c 			cgroup_update_frozen(task_dfl_cgroup(tsk));
tsk              6133 kernel/cgroup/cgroup.c 		ss->exit(tsk);
tsk               423 kernel/cgroup/cpuset.c 					struct task_struct *tsk)
tsk               426 kernel/cgroup/cpuset.c 		task_set_spread_page(tsk);
tsk               428 kernel/cgroup/cpuset.c 		task_clear_spread_page(tsk);
tsk               431 kernel/cgroup/cpuset.c 		task_set_spread_slab(tsk);
tsk               433 kernel/cgroup/cpuset.c 		task_clear_spread_slab(tsk);
tsk              1617 kernel/cgroup/cpuset.c static void cpuset_change_task_nodemask(struct task_struct *tsk,
tsk              1620 kernel/cgroup/cpuset.c 	task_lock(tsk);
tsk              1623 kernel/cgroup/cpuset.c 	write_seqcount_begin(&tsk->mems_allowed_seq);
tsk              1625 kernel/cgroup/cpuset.c 	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
tsk              1626 kernel/cgroup/cpuset.c 	mpol_rebind_task(tsk, newmems);
tsk              1627 kernel/cgroup/cpuset.c 	tsk->mems_allowed = *newmems;
tsk              1629 kernel/cgroup/cpuset.c 	write_seqcount_end(&tsk->mems_allowed_seq);
tsk              1632 kernel/cgroup/cpuset.c 	task_unlock(tsk);
tsk              3275 kernel/cgroup/cpuset.c void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
tsk              3281 kernel/cgroup/cpuset.c 	guarantee_online_cpus(task_cs(tsk), pmask);
tsk              3298 kernel/cgroup/cpuset.c void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
tsk              3301 kernel/cgroup/cpuset.c 	do_set_cpus_allowed(tsk, is_in_v2_mode() ?
tsk              3302 kernel/cgroup/cpuset.c 		task_cs(tsk)->cpus_allowed : cpu_possible_mask);
tsk              3339 kernel/cgroup/cpuset.c nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
tsk              3346 kernel/cgroup/cpuset.c 	guarantee_online_mems(task_cs(tsk), &mask);
tsk              3585 kernel/cgroup/cpuset.c 		     struct pid *pid, struct task_struct *tsk)
tsk              3596 kernel/cgroup/cpuset.c 	css = task_get_css(tsk, cpuset_cgrp_id);
tsk               157 kernel/cred.c  void exit_creds(struct task_struct *tsk)
tsk               161 kernel/cred.c  	kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
tsk               162 kernel/cred.c  	       atomic_read(&tsk->cred->usage),
tsk               163 kernel/cred.c  	       read_cred_subscribers(tsk->cred));
tsk               165 kernel/cred.c  	cred = (struct cred *) tsk->real_cred;
tsk               166 kernel/cred.c  	tsk->real_cred = NULL;
tsk               171 kernel/cred.c  	cred = (struct cred *) tsk->cred;
tsk               172 kernel/cred.c  	tsk->cred = NULL;
tsk               178 kernel/cred.c  	key_put(tsk->cached_requested_key);
tsk               179 kernel/cred.c  	tsk->cached_requested_key = NULL;
tsk               802 kernel/cred.c  			       const struct task_struct *tsk)
tsk               807 kernel/cred.c  	       cred == tsk->real_cred ? "[real]" : "",
tsk               808 kernel/cred.c  	       cred == tsk->cred ? "[eff]" : "");
tsk               850 kernel/cred.c  void __validate_process_creds(struct task_struct *tsk,
tsk               853 kernel/cred.c  	if (tsk->cred == tsk->real_cred) {
tsk               854 kernel/cred.c  		if (unlikely(read_cred_subscribers(tsk->cred) < 2 ||
tsk               855 kernel/cred.c  			     creds_are_invalid(tsk->cred)))
tsk               858 kernel/cred.c  		if (unlikely(read_cred_subscribers(tsk->real_cred) < 1 ||
tsk               859 kernel/cred.c  			     read_cred_subscribers(tsk->cred) < 1 ||
tsk               860 kernel/cred.c  			     creds_are_invalid(tsk->real_cred) ||
tsk               861 kernel/cred.c  			     creds_are_invalid(tsk->cred)))
tsk               870 kernel/cred.c  	dump_invalid_creds(tsk->real_cred, "Real", tsk);
tsk               871 kernel/cred.c  	if (tsk->cred != tsk->real_cred)
tsk               872 kernel/cred.c  		dump_invalid_creds(tsk->cred, "Effective", tsk);
tsk               882 kernel/cred.c  void validate_creds_for_do_exit(struct task_struct *tsk)
tsk               885 kernel/cred.c  	       tsk->real_cred, tsk->cred,
tsk               886 kernel/cred.c  	       atomic_read(&tsk->cred->usage),
tsk               887 kernel/cred.c  	       read_cred_subscribers(tsk->cred));
tsk               889 kernel/cred.c  	__validate_process_creds(tsk, __FILE__, __LINE__);
tsk                34 kernel/delayacct.c void __delayacct_tsk_init(struct task_struct *tsk)
tsk                36 kernel/delayacct.c 	tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
tsk                37 kernel/delayacct.c 	if (tsk->delays)
tsk                38 kernel/delayacct.c 		raw_spin_lock_init(&tsk->delays->lock);
tsk                85 kernel/delayacct.c int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
tsk                92 kernel/delayacct.c 	task_cputime(tsk, &utime, &stime);
tsk                97 kernel/delayacct.c 	task_cputime_scaled(tsk, &utimescaled, &stimescaled);
tsk               107 kernel/delayacct.c 	t1 = tsk->sched_info.pcount;
tsk               108 kernel/delayacct.c 	t2 = tsk->sched_info.run_delay;
tsk               109 kernel/delayacct.c 	t3 = tsk->se.sum_exec_runtime;
tsk               122 kernel/delayacct.c 	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
tsk               123 kernel/delayacct.c 	tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
tsk               125 kernel/delayacct.c 	tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
tsk               127 kernel/delayacct.c 	tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
tsk               129 kernel/delayacct.c 	tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
tsk               131 kernel/delayacct.c 	d->blkio_count += tsk->delays->blkio_count;
tsk               132 kernel/delayacct.c 	d->swapin_count += tsk->delays->swapin_count;
tsk               133 kernel/delayacct.c 	d->freepages_count += tsk->delays->freepages_count;
tsk               134 kernel/delayacct.c 	d->thrashing_count += tsk->delays->thrashing_count;
tsk               135 kernel/delayacct.c 	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
tsk               140 kernel/delayacct.c __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
tsk               145 kernel/delayacct.c 	raw_spin_lock_irqsave(&tsk->delays->lock, flags);
tsk               146 kernel/delayacct.c 	ret = nsec_to_clock_t(tsk->delays->blkio_delay +
tsk               147 kernel/delayacct.c 				tsk->delays->swapin_delay);
tsk               148 kernel/delayacct.c 	raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
tsk               107 kernel/events/hw_breakpoint.c 	struct task_struct *tsk = bp->hw.target;
tsk               112 kernel/events/hw_breakpoint.c 		if (iter->hw.target == tsk &&
tsk               444 kernel/events/hw_breakpoint.c 			    struct task_struct *tsk)
tsk               446 kernel/events/hw_breakpoint.c 	return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
tsk              1641 kernel/events/uprobes.c static void xol_free_insn_slot(struct task_struct *tsk)
tsk              1647 kernel/events/uprobes.c 	if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
tsk              1650 kernel/events/uprobes.c 	slot_addr = tsk->utask->xol_vaddr;
tsk              1654 kernel/events/uprobes.c 	area = tsk->mm->uprobes_state.xol_area;
tsk              1671 kernel/events/uprobes.c 		tsk->utask->xol_vaddr = 0;
tsk                92 kernel/exit.c  static void __exit_signal(struct task_struct *tsk)
tsk                94 kernel/exit.c  	struct signal_struct *sig = tsk->signal;
tsk                95 kernel/exit.c  	bool group_dead = thread_group_leader(tsk);
tsk               100 kernel/exit.c  	sighand = rcu_dereference_check(tsk->sighand,
tsk               105 kernel/exit.c  	posix_cpu_timers_exit(tsk);
tsk               107 kernel/exit.c  		posix_cpu_timers_exit_group(tsk);
tsk               114 kernel/exit.c  		if (unlikely(has_group_leader_pid(tsk)))
tsk               115 kernel/exit.c  			posix_cpu_timers_exit_group(tsk);
tsk               130 kernel/exit.c  		if (tsk == sig->curr_target)
tsk               131 kernel/exit.c  			sig->curr_target = next_thread(tsk);
tsk               134 kernel/exit.c  	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
tsk               143 kernel/exit.c  	task_cputime(tsk, &utime, &stime);
tsk               147 kernel/exit.c  	sig->gtime += task_gtime(tsk);
tsk               148 kernel/exit.c  	sig->min_flt += tsk->min_flt;
tsk               149 kernel/exit.c  	sig->maj_flt += tsk->maj_flt;
tsk               150 kernel/exit.c  	sig->nvcsw += tsk->nvcsw;
tsk               151 kernel/exit.c  	sig->nivcsw += tsk->nivcsw;
tsk               152 kernel/exit.c  	sig->inblock += task_io_get_inblock(tsk);
tsk               153 kernel/exit.c  	sig->oublock += task_io_get_oublock(tsk);
tsk               154 kernel/exit.c  	task_io_accounting_add(&sig->ioac, &tsk->ioac);
tsk               155 kernel/exit.c  	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
tsk               157 kernel/exit.c  	__unhash_process(tsk, group_dead);
tsk               164 kernel/exit.c  	flush_sigqueue(&tsk->pending);
tsk               165 kernel/exit.c  	tsk->sighand = NULL;
tsk               169 kernel/exit.c  	clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
tsk               178 kernel/exit.c  	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
tsk               180 kernel/exit.c  	perf_event_delayed_put(tsk);
tsk               181 kernel/exit.c  	trace_sched_process_free(tsk);
tsk               182 kernel/exit.c  	put_task_struct(tsk);
tsk               318 kernel/exit.c  kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
tsk               320 kernel/exit.c  	struct pid *pgrp = task_pgrp(tsk);
tsk               321 kernel/exit.c  	struct task_struct *ignored_task = tsk;
tsk               327 kernel/exit.c  		parent = tsk->real_parent;
tsk               335 kernel/exit.c  	    task_session(parent) == task_session(tsk) &&
tsk               645 kernel/exit.c  static void exit_notify(struct task_struct *tsk, int group_dead)
tsk               652 kernel/exit.c  	forget_original_parent(tsk, &dead);
tsk               655 kernel/exit.c  		kill_orphaned_pgrp(tsk->group_leader, NULL);
tsk               657 kernel/exit.c  	tsk->exit_state = EXIT_ZOMBIE;
tsk               658 kernel/exit.c  	if (unlikely(tsk->ptrace)) {
tsk               659 kernel/exit.c  		int sig = thread_group_leader(tsk) &&
tsk               660 kernel/exit.c  				thread_group_empty(tsk) &&
tsk               661 kernel/exit.c  				!ptrace_reparented(tsk) ?
tsk               662 kernel/exit.c  			tsk->exit_signal : SIGCHLD;
tsk               663 kernel/exit.c  		autoreap = do_notify_parent(tsk, sig);
tsk               664 kernel/exit.c  	} else if (thread_group_leader(tsk)) {
tsk               665 kernel/exit.c  		autoreap = thread_group_empty(tsk) &&
tsk               666 kernel/exit.c  			do_notify_parent(tsk, tsk->exit_signal);
tsk               672 kernel/exit.c  		tsk->exit_state = EXIT_DEAD;
tsk               673 kernel/exit.c  		list_add(&tsk->ptrace_entry, &dead);
tsk               677 kernel/exit.c  	if (unlikely(tsk->signal->notify_count < 0))
tsk               678 kernel/exit.c  		wake_up_process(tsk->signal->group_exit_task);
tsk               713 kernel/exit.c  	struct task_struct *tsk = current;
tsk               716 kernel/exit.c  	profile_task_exit(tsk);
tsk               717 kernel/exit.c  	kcov_task_exit(tsk);
tsk               719 kernel/exit.c  	WARN_ON(blk_needs_flush_plug(tsk));
tsk               723 kernel/exit.c  	if (unlikely(!tsk->pid))
tsk               737 kernel/exit.c  	validate_creds_for_do_exit(tsk);
tsk               743 kernel/exit.c  	if (unlikely(tsk->flags & PF_EXITING)) {
tsk               745 kernel/exit.c  		futex_exit_recursive(tsk);
tsk               750 kernel/exit.c  	exit_signals(tsk);  /* sets PF_EXITING */
tsk               760 kernel/exit.c  	if (tsk->mm)
tsk               761 kernel/exit.c  		sync_mm_rss(tsk->mm);
tsk               762 kernel/exit.c  	acct_update_integrals(tsk);
tsk               763 kernel/exit.c  	group_dead = atomic_dec_and_test(&tsk->signal->live);
tsk               769 kernel/exit.c  		if (unlikely(is_global_init(tsk)))
tsk               771 kernel/exit.c  				tsk->signal->group_exit_code ?: (int)code);
tsk               774 kernel/exit.c  		hrtimer_cancel(&tsk->signal->real_timer);
tsk               775 kernel/exit.c  		exit_itimers(tsk->signal);
tsk               777 kernel/exit.c  		if (tsk->mm)
tsk               778 kernel/exit.c  			setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
tsk               783 kernel/exit.c  	audit_free(tsk);
tsk               785 kernel/exit.c  	tsk->exit_code = code;
tsk               786 kernel/exit.c  	taskstats_exit(tsk, group_dead);
tsk               792 kernel/exit.c  	trace_sched_process_exit(tsk);
tsk               794 kernel/exit.c  	exit_sem(tsk);
tsk               795 kernel/exit.c  	exit_shm(tsk);
tsk               796 kernel/exit.c  	exit_files(tsk);
tsk               797 kernel/exit.c  	exit_fs(tsk);
tsk               800 kernel/exit.c  	exit_task_namespaces(tsk);
tsk               801 kernel/exit.c  	exit_task_work(tsk);
tsk               802 kernel/exit.c  	exit_thread(tsk);
tsk               803 kernel/exit.c  	exit_umh(tsk);
tsk               811 kernel/exit.c  	perf_event_exit_task(tsk);
tsk               813 kernel/exit.c  	sched_autogroup_exit_task(tsk);
tsk               814 kernel/exit.c  	cgroup_exit(tsk);
tsk               819 kernel/exit.c  	flush_ptrace_hw_breakpoint(tsk);
tsk               822 kernel/exit.c  	exit_notify(tsk, group_dead);
tsk               823 kernel/exit.c  	proc_exit_connector(tsk);
tsk               824 kernel/exit.c  	mpol_put_task_policy(tsk);
tsk               834 kernel/exit.c  	if (tsk->io_context)
tsk               835 kernel/exit.c  		exit_io_context(tsk);
tsk               837 kernel/exit.c  	if (tsk->splice_pipe)
tsk               838 kernel/exit.c  		free_pipe_info(tsk->splice_pipe);
tsk               840 kernel/exit.c  	if (tsk->task_frag.page)
tsk               841 kernel/exit.c  		put_page(tsk->task_frag.page);
tsk               843 kernel/exit.c  	validate_creds_for_do_exit(tsk);
tsk               847 kernel/exit.c  	if (tsk->nr_dirtied)
tsk               848 kernel/exit.c  		__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
tsk               852 kernel/exit.c  	lockdep_free_task(tsk);
tsk              1369 kernel/exit.c  static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
tsk              1373 kernel/exit.c  	list_for_each_entry(p, &tsk->children, sibling) {
tsk              1383 kernel/exit.c  static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
tsk              1387 kernel/exit.c  	list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
tsk              1421 kernel/exit.c  	struct task_struct *tsk;
tsk              1443 kernel/exit.c  	tsk = current;
tsk              1445 kernel/exit.c  		retval = do_wait_thread(wo, tsk);
tsk              1449 kernel/exit.c  		retval = ptrace_do_wait(wo, tsk);
tsk              1455 kernel/exit.c  	} while_each_thread(current, tsk);
tsk               160 kernel/fork.c  void __weak arch_release_task_struct(struct task_struct *tsk)
tsk               172 kernel/fork.c  static inline void free_task_struct(struct task_struct *tsk)
tsk               174 kernel/fork.c  	kmem_cache_free(task_struct_cachep, tsk);
tsk               213 kernel/fork.c  static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
tsk               230 kernel/fork.c  		tsk->stack_vm_area = s;
tsk               231 kernel/fork.c  		tsk->stack = s->addr;
tsk               252 kernel/fork.c  		tsk->stack_vm_area = find_vm_area(stack);
tsk               253 kernel/fork.c  		tsk->stack = stack;
tsk               261 kernel/fork.c  		tsk->stack = page_address(page);
tsk               262 kernel/fork.c  		return tsk->stack;
tsk               268 kernel/fork.c  static inline void free_thread_stack(struct task_struct *tsk)
tsk               271 kernel/fork.c  	struct vm_struct *vm = task_stack_vm_area(tsk);
tsk               286 kernel/fork.c  					NULL, tsk->stack_vm_area) != NULL)
tsk               292 kernel/fork.c  		vfree_atomic(tsk->stack);
tsk               297 kernel/fork.c  	__free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
tsk               302 kernel/fork.c  static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
tsk               307 kernel/fork.c  	tsk->stack = stack;
tsk               311 kernel/fork.c  static void free_thread_stack(struct task_struct *tsk)
tsk               313 kernel/fork.c  	kmem_cache_free(thread_stack_cache, tsk->stack);
tsk               370 kernel/fork.c  static void account_kernel_stack(struct task_struct *tsk, int account)
tsk               372 kernel/fork.c  	void *stack = task_stack_page(tsk);
tsk               373 kernel/fork.c  	struct vm_struct *vm = task_stack_vm_area(tsk);
tsk               402 kernel/fork.c  static int memcg_charge_kernel_stack(struct task_struct *tsk)
tsk               405 kernel/fork.c  	struct vm_struct *vm = task_stack_vm_area(tsk);
tsk               431 kernel/fork.c  static void release_task_stack(struct task_struct *tsk)
tsk               433 kernel/fork.c  	if (WARN_ON(tsk->state != TASK_DEAD))
tsk               436 kernel/fork.c  	account_kernel_stack(tsk, -1);
tsk               437 kernel/fork.c  	free_thread_stack(tsk);
tsk               438 kernel/fork.c  	tsk->stack = NULL;
tsk               440 kernel/fork.c  	tsk->stack_vm_area = NULL;
tsk               445 kernel/fork.c  void put_task_stack(struct task_struct *tsk)
tsk               447 kernel/fork.c  	if (refcount_dec_and_test(&tsk->stack_refcount))
tsk               448 kernel/fork.c  		release_task_stack(tsk);
tsk               452 kernel/fork.c  void free_task(struct task_struct *tsk)
tsk               459 kernel/fork.c  	release_task_stack(tsk);
tsk               465 kernel/fork.c  	WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
tsk               467 kernel/fork.c  	rt_mutex_debug_task_free(tsk);
tsk               468 kernel/fork.c  	ftrace_graph_exit_task(tsk);
tsk               469 kernel/fork.c  	put_seccomp_filter(tsk);
tsk               470 kernel/fork.c  	arch_release_task_struct(tsk);
tsk               471 kernel/fork.c  	if (tsk->flags & PF_KTHREAD)
tsk               472 kernel/fork.c  		free_kthread_struct(tsk);
tsk               473 kernel/fork.c  	free_task_struct(tsk);
tsk               734 kernel/fork.c  void __put_task_struct(struct task_struct *tsk)
tsk               736 kernel/fork.c  	WARN_ON(!tsk->exit_state);
tsk               737 kernel/fork.c  	WARN_ON(refcount_read(&tsk->usage));
tsk               738 kernel/fork.c  	WARN_ON(tsk == current);
tsk               740 kernel/fork.c  	cgroup_free(tsk);
tsk               741 kernel/fork.c  	task_numa_free(tsk, true);
tsk               742 kernel/fork.c  	security_task_free(tsk);
tsk               743 kernel/fork.c  	exit_creds(tsk);
tsk               744 kernel/fork.c  	delayacct_tsk_free(tsk);
tsk               745 kernel/fork.c  	put_signal_struct(tsk->signal);
tsk               747 kernel/fork.c  	if (!profile_handoff_task(tsk))
tsk               748 kernel/fork.c  		free_task(tsk);
tsk               848 kernel/fork.c  void set_task_stack_end_magic(struct task_struct *tsk)
tsk               852 kernel/fork.c  	stackend = end_of_stack(tsk);
tsk               858 kernel/fork.c  	struct task_struct *tsk;
tsk               865 kernel/fork.c  	tsk = alloc_task_struct_node(node);
tsk               866 kernel/fork.c  	if (!tsk)
tsk               869 kernel/fork.c  	stack = alloc_thread_stack_node(tsk, node);
tsk               873 kernel/fork.c  	if (memcg_charge_kernel_stack(tsk))
tsk               876 kernel/fork.c  	stack_vm_area = task_stack_vm_area(tsk);
tsk               878 kernel/fork.c  	err = arch_dup_task_struct(tsk, orig);
tsk               885 kernel/fork.c  	tsk->stack = stack;
tsk               887 kernel/fork.c  	tsk->stack_vm_area = stack_vm_area;
tsk               890 kernel/fork.c  	refcount_set(&tsk->stack_refcount, 1);
tsk               903 kernel/fork.c  	tsk->seccomp.filter = NULL;
tsk               906 kernel/fork.c  	setup_thread_stack(tsk, orig);
tsk               907 kernel/fork.c  	clear_user_return_notifier(tsk);
tsk               908 kernel/fork.c  	clear_tsk_need_resched(tsk);
tsk               909 kernel/fork.c  	set_task_stack_end_magic(tsk);
tsk               912 kernel/fork.c  	tsk->stack_canary = get_random_canary();
tsk               915 kernel/fork.c  		tsk->cpus_ptr = &tsk->cpus_mask;
tsk               921 kernel/fork.c  	refcount_set(&tsk->rcu_users, 2);
tsk               923 kernel/fork.c  	refcount_set(&tsk->usage, 1);
tsk               925 kernel/fork.c  	tsk->btrace_seq = 0;
tsk               927 kernel/fork.c  	tsk->splice_pipe = NULL;
tsk               928 kernel/fork.c  	tsk->task_frag.page = NULL;
tsk               929 kernel/fork.c  	tsk->wake_q.next = NULL;
tsk               931 kernel/fork.c  	account_kernel_stack(tsk, 1);
tsk               933 kernel/fork.c  	kcov_task_init(tsk);
tsk               936 kernel/fork.c  	tsk->fail_nth = 0;
tsk               940 kernel/fork.c  	tsk->throttle_queue = NULL;
tsk               941 kernel/fork.c  	tsk->use_memdelay = 0;
tsk               945 kernel/fork.c  	tsk->active_memcg = NULL;
tsk               947 kernel/fork.c  	return tsk;
tsk               950 kernel/fork.c  	free_thread_stack(tsk);
tsk               952 kernel/fork.c  	free_task_struct(tsk);
tsk              1239 kernel/fork.c  static void complete_vfork_done(struct task_struct *tsk)
tsk              1243 kernel/fork.c  	task_lock(tsk);
tsk              1244 kernel/fork.c  	vfork = tsk->vfork_done;
tsk              1246 kernel/fork.c  		tsk->vfork_done = NULL;
tsk              1249 kernel/fork.c  	task_unlock(tsk);
tsk              1286 kernel/fork.c  static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
tsk              1288 kernel/fork.c  	uprobe_free_utask(tsk);
tsk              1291 kernel/fork.c  	deactivate_mm(tsk, mm);
tsk              1298 kernel/fork.c  	if (tsk->clear_child_tid) {
tsk              1299 kernel/fork.c  		if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
tsk              1305 kernel/fork.c  			put_user(0, tsk->clear_child_tid);
tsk              1306 kernel/fork.c  			do_futex(tsk->clear_child_tid, FUTEX_WAKE,
tsk              1309 kernel/fork.c  		tsk->clear_child_tid = NULL;
tsk              1316 kernel/fork.c  	if (tsk->vfork_done)
tsk              1317 kernel/fork.c  		complete_vfork_done(tsk);
tsk              1320 kernel/fork.c  void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
tsk              1322 kernel/fork.c  	futex_exit_release(tsk);
tsk              1323 kernel/fork.c  	mm_release(tsk, mm);
tsk              1326 kernel/fork.c  void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
tsk              1328 kernel/fork.c  	futex_exec_release(tsk);
tsk              1329 kernel/fork.c  	mm_release(tsk, mm);
tsk              1342 kernel/fork.c  static struct mm_struct *dup_mm(struct task_struct *tsk,
tsk              1354 kernel/fork.c  	if (!mm_init(mm, tsk, mm->user_ns))
tsk              1379 kernel/fork.c  static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
tsk              1384 kernel/fork.c  	tsk->min_flt = tsk->maj_flt = 0;
tsk              1385 kernel/fork.c  	tsk->nvcsw = tsk->nivcsw = 0;
tsk              1387 kernel/fork.c  	tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
tsk              1388 kernel/fork.c  	tsk->last_switch_time = 0;
tsk              1391 kernel/fork.c  	tsk->mm = NULL;
tsk              1392 kernel/fork.c  	tsk->active_mm = NULL;
tsk              1404 kernel/fork.c  	vmacache_flush(tsk);
tsk              1413 kernel/fork.c  	mm = dup_mm(tsk, current->mm);
tsk              1418 kernel/fork.c  	tsk->mm = mm;
tsk              1419 kernel/fork.c  	tsk->active_mm = mm;
tsk              1426 kernel/fork.c  static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
tsk              1440 kernel/fork.c  	tsk->fs = copy_fs_struct(fs);
tsk              1441 kernel/fork.c  	if (!tsk->fs)
tsk              1446 kernel/fork.c  static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
tsk              1467 kernel/fork.c  	tsk->files = newf;
tsk              1473 kernel/fork.c  static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
tsk              1486 kernel/fork.c  		tsk->io_context = ioc;
tsk              1488 kernel/fork.c  		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
tsk              1499 kernel/fork.c  static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
tsk              1508 kernel/fork.c  	rcu_assign_pointer(tsk->sighand, sig);
tsk              1543 kernel/fork.c  static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
tsk              1551 kernel/fork.c  	tsk->signal = sig;
tsk              1560 kernel/fork.c  	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
tsk              1561 kernel/fork.c  	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
tsk              1564 kernel/fork.c  	sig->curr_target = tsk;
tsk              1739 kernel/fork.c  	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
tsk              1741 kernel/fork.c  	free_task(tsk);
tsk              1744 kernel/fork.c  static __always_inline void delayed_free_task(struct task_struct *tsk)
tsk              1747 kernel/fork.c  		call_rcu(&tsk->rcu, __delayed_free_task);
tsk              1749 kernel/fork.c  		free_task(tsk);
tsk              1227 kernel/futex.c 			    struct task_struct *tsk)
tsk              1235 kernel/futex.c 	if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
tsk              3759 kernel/futex.c static void futex_cleanup(struct task_struct *tsk)
tsk              3761 kernel/futex.c 	if (unlikely(tsk->robust_list)) {
tsk              3762 kernel/futex.c 		exit_robust_list(tsk);
tsk              3763 kernel/futex.c 		tsk->robust_list = NULL;
tsk              3767 kernel/futex.c 	if (unlikely(tsk->compat_robust_list)) {
tsk              3768 kernel/futex.c 		compat_exit_robust_list(tsk);
tsk              3769 kernel/futex.c 		tsk->compat_robust_list = NULL;
tsk              3773 kernel/futex.c 	if (unlikely(!list_empty(&tsk->pi_state_list)))
tsk              3774 kernel/futex.c 		exit_pi_state_list(tsk);
tsk              3794 kernel/futex.c void futex_exit_recursive(struct task_struct *tsk)
tsk              3797 kernel/futex.c 	if (tsk->futex_state == FUTEX_STATE_EXITING)
tsk              3798 kernel/futex.c 		mutex_unlock(&tsk->futex_exit_mutex);
tsk              3799 kernel/futex.c 	tsk->futex_state = FUTEX_STATE_DEAD;
tsk              3802 kernel/futex.c static void futex_cleanup_begin(struct task_struct *tsk)
tsk              3810 kernel/futex.c 	mutex_lock(&tsk->futex_exit_mutex);
tsk              3823 kernel/futex.c 	raw_spin_lock_irq(&tsk->pi_lock);
tsk              3824 kernel/futex.c 	tsk->futex_state = FUTEX_STATE_EXITING;
tsk              3825 kernel/futex.c 	raw_spin_unlock_irq(&tsk->pi_lock);
tsk              3828 kernel/futex.c static void futex_cleanup_end(struct task_struct *tsk, int state)
tsk              3834 kernel/futex.c 	tsk->futex_state = state;
tsk              3839 kernel/futex.c 	mutex_unlock(&tsk->futex_exit_mutex);
tsk              3842 kernel/futex.c void futex_exec_release(struct task_struct *tsk)
tsk              3851 kernel/futex.c 	futex_cleanup_begin(tsk);
tsk              3852 kernel/futex.c 	futex_cleanup(tsk);
tsk              3857 kernel/futex.c 	futex_cleanup_end(tsk, FUTEX_STATE_OK);
tsk              3860 kernel/futex.c void futex_exit_release(struct task_struct *tsk)
tsk              3862 kernel/futex.c 	futex_cleanup_begin(tsk);
tsk              3863 kernel/futex.c 	futex_cleanup(tsk);
tsk              3864 kernel/futex.c 	futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
tsk              1027 kernel/irq/manage.c 	struct task_struct *tsk = current;
tsk              1034 kernel/irq/manage.c 	action = kthread_data(tsk);
tsk              1037 kernel/irq/manage.c 	       tsk->comm, tsk->pid, action->irq);
tsk              1196 kernel/kprobes.c void kretprobe_hash_lock(struct task_struct *tsk,
tsk              1200 kernel/kprobes.c 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
tsk              1218 kernel/kprobes.c void kretprobe_hash_unlock(struct task_struct *tsk,
tsk              1222 kernel/kprobes.c 	unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
tsk               261 kernel/kthread.c int tsk_fork_get_node(struct task_struct *tsk)
tsk               264 kernel/kthread.c 	if (tsk == kthreadd_task)
tsk               265 kernel/kthread.c 		return tsk->pref_node_fork;
tsk               570 kernel/kthread.c 	struct task_struct *tsk = current;
tsk               573 kernel/kthread.c 	set_task_comm(tsk, "kthreadd");
tsk               574 kernel/kthread.c 	ignore_signals(tsk);
tsk               575 kernel/kthread.c 	set_cpus_allowed_ptr(tsk, cpu_all_mask);
tsk                86 kernel/latencytop.c account_global_scheduler_latency(struct task_struct *tsk,
tsk                93 kernel/latencytop.c 	if (!tsk->mm)
tsk               151 kernel/latencytop.c __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
tsk               171 kernel/latencytop.c 	stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
tsk               175 kernel/latencytop.c 	account_global_scheduler_latency(tsk, &lat);
tsk               177 kernel/latencytop.c 	for (i = 0; i < tsk->latency_record_count; i++) {
tsk               181 kernel/latencytop.c 		mylat = &tsk->latency_record[i];
tsk               206 kernel/latencytop.c 	if (tsk->latency_record_count >= LT_SAVECOUNT)
tsk               210 kernel/latencytop.c 	i = tsk->latency_record_count++;
tsk               211 kernel/latencytop.c 	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
tsk               535 kernel/locking/rwsem.c 		struct task_struct *tsk;
tsk               537 kernel/locking/rwsem.c 		tsk = waiter->task;
tsk               538 kernel/locking/rwsem.c 		get_task_struct(tsk);
tsk               551 kernel/locking/rwsem.c 		wake_q_add_safe(wake_q, tsk);
tsk                61 kernel/nsproxy.c 	struct task_struct *tsk, struct user_namespace *user_ns,
tsk                71 kernel/nsproxy.c 	new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs);
tsk                77 kernel/nsproxy.c 	new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns);
tsk                83 kernel/nsproxy.c 	new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns);
tsk                90 kernel/nsproxy.c 		copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
tsk                97 kernel/nsproxy.c 					    tsk->nsproxy->cgroup_ns);
tsk               103 kernel/nsproxy.c 	new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns);
tsk               134 kernel/nsproxy.c int copy_namespaces(unsigned long flags, struct task_struct *tsk)
tsk               136 kernel/nsproxy.c 	struct nsproxy *old_ns = tsk->nsproxy;
tsk               137 kernel/nsproxy.c 	struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
tsk               161 kernel/nsproxy.c 	new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs);
tsk               165 kernel/nsproxy.c 	tsk->nsproxy = new_ns;
tsk               235 kernel/nsproxy.c 	struct task_struct *tsk = current;
tsk               250 kernel/nsproxy.c 	new_nsproxy = create_new_namespaces(0, tsk, current_user_ns(), tsk->fs);
tsk               261 kernel/nsproxy.c 	switch_task_namespaces(tsk, new_nsproxy);
tsk               263 kernel/nsproxy.c 	perf_event_namespaces(tsk);
tsk               441 kernel/pid.c   struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
tsk               443 kernel/pid.c   	return ns_of_pid(task_pid(tsk));
tsk                42 kernel/ptrace.c int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
tsk                48 kernel/ptrace.c 	mm = get_task_mm(tsk);
tsk                52 kernel/ptrace.c 	if (!tsk->ptrace ||
tsk                53 kernel/ptrace.c 	    (current != tsk->parent) ||
tsk                55 kernel/ptrace.c 	     !ptracer_capable(tsk, mm->user_ns))) {
tsk                60 kernel/ptrace.c 	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
tsk               593 kernel/ptrace.c int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
tsk               602 kernel/ptrace.c 		retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
tsk               619 kernel/ptrace.c int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
tsk               630 kernel/ptrace.c 		retval = ptrace_access_vm(tsk, dst, buf, this_len,
tsk              1287 kernel/ptrace.c int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
tsk              1293 kernel/ptrace.c 	copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
tsk              1299 kernel/ptrace.c int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
tsk              1304 kernel/ptrace.c 	copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
tsk              4102 kernel/sched/core.c static inline void sched_submit_work(struct task_struct *tsk)
tsk              4104 kernel/sched/core.c 	if (!tsk->state)
tsk              4114 kernel/sched/core.c 	if (tsk->flags & PF_WQ_WORKER) {
tsk              4116 kernel/sched/core.c 		wq_worker_sleeping(tsk);
tsk              4120 kernel/sched/core.c 	if (tsk_is_pi_blocked(tsk))
tsk              4127 kernel/sched/core.c 	if (blk_needs_flush_plug(tsk))
tsk              4128 kernel/sched/core.c 		blk_schedule_flush_plug(tsk);
tsk              4131 kernel/sched/core.c static void sched_update_worker(struct task_struct *tsk)
tsk              4133 kernel/sched/core.c 	if (tsk->flags & PF_WQ_WORKER)
tsk              4134 kernel/sched/core.c 		wq_worker_running(tsk);
tsk              4139 kernel/sched/core.c 	struct task_struct *tsk = current;
tsk              4141 kernel/sched/core.c 	sched_submit_work(tsk);
tsk              4147 kernel/sched/core.c 	sched_update_worker(tsk);
tsk              7004 kernel/sched/core.c static void sched_change_group(struct task_struct *tsk, int type)
tsk              7013 kernel/sched/core.c 	tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
tsk              7015 kernel/sched/core.c 	tg = autogroup_task_group(tsk, tg);
tsk              7016 kernel/sched/core.c 	tsk->sched_task_group = tg;
tsk              7019 kernel/sched/core.c 	if (tsk->sched_class->task_change_group)
tsk              7020 kernel/sched/core.c 		tsk->sched_class->task_change_group(tsk, type);
tsk              7023 kernel/sched/core.c 		set_task_rq(tsk, task_cpu(tsk));
tsk              7033 kernel/sched/core.c void sched_move_task(struct task_struct *tsk)
tsk              7040 kernel/sched/core.c 	rq = task_rq_lock(tsk, &rf);
tsk              7043 kernel/sched/core.c 	running = task_current(rq, tsk);
tsk              7044 kernel/sched/core.c 	queued = task_on_rq_queued(tsk);
tsk              7047 kernel/sched/core.c 		dequeue_task(rq, tsk, queue_flags);
tsk              7049 kernel/sched/core.c 		put_prev_task(rq, tsk);
tsk              7051 kernel/sched/core.c 	sched_change_group(tsk, TASK_MOVE_GROUP);
tsk              7054 kernel/sched/core.c 		enqueue_task(rq, tsk, queue_flags);
tsk              7056 kernel/sched/core.c 		set_next_task(rq, tsk);
tsk              7065 kernel/sched/core.c 	task_rq_unlock(rq, tsk, &rf);
tsk                41 kernel/sched/cpuacct.c static inline struct cpuacct *task_ca(struct task_struct *tsk)
tsk                43 kernel/sched/cpuacct.c 	return css_ca(task_css(tsk, cpuacct_cgrp_id));
tsk               338 kernel/sched/cpuacct.c void cpuacct_charge(struct task_struct *tsk, u64 cputime)
tsk               342 kernel/sched/cpuacct.c 	struct pt_regs *regs = task_pt_regs(tsk);
tsk               349 kernel/sched/cpuacct.c 	for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
tsk               360 kernel/sched/cpuacct.c void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
tsk               365 kernel/sched/cpuacct.c 	for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
tsk               293 kernel/sched/cputime.c void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
tsk               295 kernel/sched/cputime.c 	struct signal_struct *sig = tsk->signal;
tsk               309 kernel/sched/cputime.c 	if (same_thread_group(current, tsk))
tsk               322 kernel/sched/cputime.c 		for_each_thread(tsk, t) {
tsk               434 kernel/sched/cputime.c void vtime_account_irq_enter(struct task_struct *tsk)
tsk               436 kernel/sched/cputime.c 	if (!in_interrupt() && is_idle_task(tsk))
tsk               437 kernel/sched/cputime.c 		vtime_account_idle(tsk);
tsk               439 kernel/sched/cputime.c 		vtime_account_system(tsk);
tsk               714 kernel/sched/cputime.c static void __vtime_account_system(struct task_struct *tsk,
tsk               719 kernel/sched/cputime.c 		account_system_time(tsk, irq_count(), vtime->stime);
tsk               724 kernel/sched/cputime.c static void vtime_account_guest(struct task_struct *tsk,
tsk               729 kernel/sched/cputime.c 		account_guest_time(tsk, vtime->gtime);
tsk               734 kernel/sched/cputime.c void vtime_account_system(struct task_struct *tsk)
tsk               736 kernel/sched/cputime.c 	struct vtime *vtime = &tsk->vtime;
tsk               743 kernel/sched/cputime.c 	if (tsk->flags & PF_VCPU)
tsk               744 kernel/sched/cputime.c 		vtime_account_guest(tsk, vtime);
tsk               746 kernel/sched/cputime.c 		__vtime_account_system(tsk, vtime);
tsk               750 kernel/sched/cputime.c void vtime_user_enter(struct task_struct *tsk)
tsk               752 kernel/sched/cputime.c 	struct vtime *vtime = &tsk->vtime;
tsk               755 kernel/sched/cputime.c 	__vtime_account_system(tsk, vtime);
tsk               760 kernel/sched/cputime.c void vtime_user_exit(struct task_struct *tsk)
tsk               762 kernel/sched/cputime.c 	struct vtime *vtime = &tsk->vtime;
tsk               767 kernel/sched/cputime.c 		account_user_time(tsk, vtime->utime);
tsk               774 kernel/sched/cputime.c void vtime_guest_enter(struct task_struct *tsk)
tsk               776 kernel/sched/cputime.c 	struct vtime *vtime = &tsk->vtime;
tsk               785 kernel/sched/cputime.c 	__vtime_account_system(tsk, vtime);
tsk               786 kernel/sched/cputime.c 	tsk->flags |= PF_VCPU;
tsk               791 kernel/sched/cputime.c void vtime_guest_exit(struct task_struct *tsk)
tsk               793 kernel/sched/cputime.c 	struct vtime *vtime = &tsk->vtime;
tsk               796 kernel/sched/cputime.c 	vtime_account_guest(tsk, vtime);
tsk               797 kernel/sched/cputime.c 	tsk->flags &= ~PF_VCPU;
tsk               802 kernel/sched/cputime.c void vtime_account_idle(struct task_struct *tsk)
tsk               804 kernel/sched/cputime.c 	account_idle_time(get_vtime_delta(&tsk->vtime));
tsk               926 kernel/sched/fair.c 	struct task_struct *tsk = NULL;
tsk               936 kernel/sched/fair.c 		tsk = task_of(se);
tsk               950 kernel/sched/fair.c 		if (tsk) {
tsk               951 kernel/sched/fair.c 			account_scheduler_latency(tsk, delta >> 10, 1);
tsk               952 kernel/sched/fair.c 			trace_sched_stat_sleep(tsk, delta);
tsk               967 kernel/sched/fair.c 		if (tsk) {
tsk               968 kernel/sched/fair.c 			if (tsk->in_iowait) {
tsk               971 kernel/sched/fair.c 				trace_sched_stat_iowait(tsk, delta);
tsk               974 kernel/sched/fair.c 			trace_sched_stat_blocked(tsk, delta);
tsk               983 kernel/sched/fair.c 						(void *)get_wchan(tsk),
tsk               986 kernel/sched/fair.c 			account_scheduler_latency(tsk, delta >> 10, 0);
tsk              1026 kernel/sched/fair.c 		struct task_struct *tsk = task_of(se);
tsk              1028 kernel/sched/fair.c 		if (tsk->state & TASK_INTERRUPTIBLE)
tsk              1031 kernel/sched/fair.c 		if (tsk->state & TASK_UNINTERRUPTIBLE)
tsk              2248 kernel/sched/fair.c 	struct task_struct *tsk;
tsk              2280 kernel/sched/fair.c 	tsk = READ_ONCE(cpu_rq(cpu)->curr);
tsk              2282 kernel/sched/fair.c 	if (!cpupid_match_pid(tsk, cpupid))
tsk              2285 kernel/sched/fair.c 	grp = rcu_dereference(tsk->numa_group);
tsk              2307 kernel/sched/fair.c 	if (tsk->mm == current->mm)
tsk              6295 kernel/sched/fair.c 		struct task_struct *tsk = cpu == dst_cpu ? p : NULL;
tsk              6314 kernel/sched/fair.c 					      FREQUENCY_UTIL, tsk);
tsk              1171 kernel/sched/rt.c 	struct task_struct *tsk;
tsk              1176 kernel/sched/rt.c 	tsk = rt_task_of(rt_se);
tsk              1178 kernel/sched/rt.c 	return (tsk->policy == SCHED_RR) ? 1 : 0;
tsk              2602 kernel/sched/rt.c int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
tsk              2605 kernel/sched/rt.c 	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
tsk               467 kernel/sched/sched.h extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
tsk               475 kernel/sched/sched.h extern void sched_move_task(struct task_struct *tsk);
tsk               558 kernel/seccomp.c void get_seccomp_filter(struct task_struct *tsk)
tsk               560 kernel/seccomp.c 	struct seccomp_filter *orig = tsk->seccomp.filter;
tsk               585 kernel/seccomp.c void put_seccomp_filter(struct task_struct *tsk)
tsk               587 kernel/seccomp.c 	__put_seccomp_filter(tsk->seccomp.filter);
tsk               513 kernel/signal.c 	struct task_struct *tsk = current;
tsk               516 kernel/signal.c 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
tsk               517 kernel/signal.c 	__flush_itimer_signals(&tsk->pending);
tsk               518 kernel/signal.c 	__flush_itimer_signals(&tsk->signal->shared_pending);
tsk               519 kernel/signal.c 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
tsk               554 kernel/signal.c bool unhandled_signal(struct task_struct *tsk, int sig)
tsk               556 kernel/signal.c 	void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
tsk               557 kernel/signal.c 	if (is_global_init(tsk))
tsk               564 kernel/signal.c 	return !tsk->ptrace;
tsk               628 kernel/signal.c int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
tsk               636 kernel/signal.c 	signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
tsk               638 kernel/signal.c 		signr = __dequeue_signal(&tsk->signal->shared_pending,
tsk               655 kernel/signal.c 			struct hrtimer *tmr = &tsk->signal->real_timer;
tsk               658 kernel/signal.c 			    tsk->signal->it_real_incr != 0) {
tsk               660 kernel/signal.c 						tsk->signal->it_real_incr);
tsk               694 kernel/signal.c 		spin_unlock(&tsk->sighand->siglock);
tsk               696 kernel/signal.c 		spin_lock(&tsk->sighand->siglock);
tsk               708 kernel/signal.c 	struct task_struct *tsk = current;
tsk               709 kernel/signal.c 	struct sigpending *pending = &tsk->pending;
tsk               715 kernel/signal.c 	if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
tsk              1368 kernel/signal.c struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
tsk              1375 kernel/signal.c 		sighand = rcu_dereference(tsk->sighand);
tsk              1391 kernel/signal.c 		if (likely(sighand == tsk->sighand))
tsk              1910 kernel/signal.c bool do_notify_parent(struct task_struct *tsk, int sig)
tsk              1921 kernel/signal.c  	BUG_ON(task_is_stopped_or_traced(tsk));
tsk              1923 kernel/signal.c 	BUG_ON(!tsk->ptrace &&
tsk              1924 kernel/signal.c 	       (tsk->group_leader != tsk || !thread_group_empty(tsk)));
tsk              1927 kernel/signal.c 	do_notify_pidfd(tsk);
tsk              1934 kernel/signal.c 		if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
tsk              1953 kernel/signal.c 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
tsk              1954 kernel/signal.c 	info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
tsk              1955 kernel/signal.c 				       task_uid(tsk));
tsk              1958 kernel/signal.c 	task_cputime(tsk, &utime, &stime);
tsk              1959 kernel/signal.c 	info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
tsk              1960 kernel/signal.c 	info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
tsk              1962 kernel/signal.c 	info.si_status = tsk->exit_code & 0x7f;
tsk              1963 kernel/signal.c 	if (tsk->exit_code & 0x80)
tsk              1965 kernel/signal.c 	else if (tsk->exit_code & 0x7f)
tsk              1969 kernel/signal.c 		info.si_status = tsk->exit_code >> 8;
tsk              1972 kernel/signal.c 	psig = tsk->parent->sighand;
tsk              1974 kernel/signal.c 	if (!tsk->ptrace && sig == SIGCHLD &&
tsk              2001 kernel/signal.c 		__send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
tsk              2002 kernel/signal.c 	__wake_up_parent(tsk, tsk->parent);
tsk              2021 kernel/signal.c static void do_notify_parent_cldstop(struct task_struct *tsk,
tsk              2031 kernel/signal.c 		parent = tsk->parent;
tsk              2033 kernel/signal.c 		tsk = tsk->group_leader;
tsk              2034 kernel/signal.c 		parent = tsk->real_parent;
tsk              2044 kernel/signal.c 	info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
tsk              2045 kernel/signal.c 	info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
tsk              2048 kernel/signal.c 	task_cputime(tsk, &utime, &stime);
tsk              2058 kernel/signal.c  		info.si_status = tsk->signal->group_exit_code & 0x7f;
tsk              2061 kernel/signal.c  		info.si_status = tsk->exit_code & 0x7f;
tsk              2075 kernel/signal.c 	__wake_up_parent(tsk, parent);
tsk              2107 kernel/signal.c static bool sigkill_pending(struct task_struct *tsk)
tsk              2109 kernel/signal.c 	return sigismember(&tsk->pending.signal, SIGKILL) ||
tsk              2110 kernel/signal.c 	       sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
tsk              2792 kernel/signal.c static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
tsk              2797 kernel/signal.c 	sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
tsk              2801 kernel/signal.c 	t = tsk;
tsk              2802 kernel/signal.c 	while_each_thread(tsk, t) {
tsk              2819 kernel/signal.c void exit_signals(struct task_struct *tsk)
tsk              2828 kernel/signal.c 	cgroup_threadgroup_change_begin(tsk);
tsk              2830 kernel/signal.c 	if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
tsk              2831 kernel/signal.c 		tsk->flags |= PF_EXITING;
tsk              2832 kernel/signal.c 		cgroup_threadgroup_change_end(tsk);
tsk              2836 kernel/signal.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk              2841 kernel/signal.c 	tsk->flags |= PF_EXITING;
tsk              2843 kernel/signal.c 	cgroup_threadgroup_change_end(tsk);
tsk              2845 kernel/signal.c 	if (!signal_pending(tsk))
tsk              2848 kernel/signal.c 	unblocked = tsk->blocked;
tsk              2850 kernel/signal.c 	retarget_shared_pending(tsk, &unblocked);
tsk              2852 kernel/signal.c 	if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
tsk              2853 kernel/signal.c 	    task_participate_group_stop(tsk))
tsk              2856 kernel/signal.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk              2864 kernel/signal.c 		do_notify_parent_cldstop(tsk, false, group_stop);
tsk              2887 kernel/signal.c static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
tsk              2889 kernel/signal.c 	if (signal_pending(tsk) && !thread_group_empty(tsk)) {
tsk              2893 kernel/signal.c 		retarget_shared_pending(tsk, &newblocked);
tsk              2895 kernel/signal.c 	tsk->blocked = *newset;
tsk              2914 kernel/signal.c 	struct task_struct *tsk = current;
tsk              2920 kernel/signal.c 	if (sigequalsets(&tsk->blocked, newset))
tsk              2923 kernel/signal.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk              2924 kernel/signal.c 	__set_task_blocked(tsk, newset);
tsk              2925 kernel/signal.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk              2938 kernel/signal.c 	struct task_struct *tsk = current;
tsk              2943 kernel/signal.c 		*oldset = tsk->blocked;
tsk              2947 kernel/signal.c 		sigorsets(&newset, &tsk->blocked, set);
tsk              2950 kernel/signal.c 		sigandnsets(&newset, &tsk->blocked, set);
tsk              3445 kernel/signal.c 	struct task_struct *tsk = current;
tsk              3462 kernel/signal.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk              3463 kernel/signal.c 	sig = dequeue_signal(tsk, &mask, info);
tsk              3471 kernel/signal.c 		tsk->real_blocked = tsk->blocked;
tsk              3472 kernel/signal.c 		sigandsets(&tsk->blocked, &tsk->blocked, &mask);
tsk              3474 kernel/signal.c 		spin_unlock_irq(&tsk->sighand->siglock);
tsk              3477 kernel/signal.c 		ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
tsk              3479 kernel/signal.c 		spin_lock_irq(&tsk->sighand->siglock);
tsk              3480 kernel/signal.c 		__set_task_blocked(tsk, &tsk->real_blocked);
tsk              3481 kernel/signal.c 		sigemptyset(&tsk->real_blocked);
tsk              3482 kernel/signal.c 		sig = dequeue_signal(tsk, &mask, info);
tsk              3484 kernel/signal.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk                32 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
tsk                34 kernel/smpboot.c 	if (!tsk)
tsk                36 kernel/smpboot.c 	init_idle(tsk, cpu);
tsk                37 kernel/smpboot.c 	return tsk;
tsk                53 kernel/smpboot.c 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
tsk                55 kernel/smpboot.c 	if (!tsk) {
tsk                56 kernel/smpboot.c 		tsk = fork_idle(cpu);
tsk                57 kernel/smpboot.c 		if (IS_ERR(tsk))
tsk                60 kernel/smpboot.c 			per_cpu(idle_threads, cpu) = tsk;
tsk               173 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
tsk               176 kernel/smpboot.c 	if (tsk)
tsk               185 kernel/smpboot.c 	tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
tsk               187 kernel/smpboot.c 	if (IS_ERR(tsk)) {
tsk               189 kernel/smpboot.c 		return PTR_ERR(tsk);
tsk               195 kernel/smpboot.c 	kthread_park(tsk);
tsk               196 kernel/smpboot.c 	get_task_struct(tsk);
tsk               197 kernel/smpboot.c 	*per_cpu_ptr(ht->store, cpu) = tsk;
tsk               205 kernel/smpboot.c 		if (!wait_task_inactive(tsk, TASK_PARKED))
tsk               230 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
tsk               233 kernel/smpboot.c 		kthread_unpark(tsk);
tsk               249 kernel/smpboot.c 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
tsk               251 kernel/smpboot.c 	if (tsk && !ht->selfparking)
tsk               252 kernel/smpboot.c 		kthread_park(tsk);
tsk               272 kernel/smpboot.c 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
tsk               274 kernel/smpboot.c 		if (tsk) {
tsk               275 kernel/smpboot.c 			kthread_stop(tsk);
tsk               276 kernel/smpboot.c 			put_task_struct(tsk);
tsk                73 kernel/softirq.c 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
tsk                75 kernel/softirq.c 	if (tsk && tsk->state != TASK_RUNNING)
tsk                76 kernel/softirq.c 		wake_up_process(tsk);
tsk                87 kernel/softirq.c 	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
tsk                91 kernel/softirq.c 	return tsk && (tsk->state == TASK_RUNNING) &&
tsk                92 kernel/softirq.c 		!__kthread_should_park(tsk);
tsk               137 kernel/stacktrace.c unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
tsk               145 kernel/stacktrace.c 		.skip	= skipnr + !!(current == tsk),
tsk               148 kernel/stacktrace.c 	if (!try_get_task_stack(tsk))
tsk               151 kernel/stacktrace.c 	arch_stack_walk(consume_entry, &c, tsk, NULL);
tsk               152 kernel/stacktrace.c 	put_task_stack(tsk);
tsk               192 kernel/stacktrace.c int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
tsk               206 kernel/stacktrace.c 	if (!try_get_task_stack(tsk))
tsk               209 kernel/stacktrace.c 	ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
tsk               210 kernel/stacktrace.c 	put_task_stack(tsk);
tsk               253 kernel/stacktrace.c save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
tsk               345 kernel/stacktrace.c int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
tsk               352 kernel/stacktrace.c 	int ret = save_stack_trace_tsk_reliable(tsk, &trace);
tsk              1527 kernel/sys.c   int do_prlimit(struct task_struct *tsk, unsigned int resource,
tsk              1545 kernel/sys.c   	if (!tsk->sighand) {
tsk              1550 kernel/sys.c   	rlim = tsk->signal->rlim + resource;
tsk              1551 kernel/sys.c   	task_lock(tsk->group_leader);
tsk              1559 kernel/sys.c   			retval = security_task_setrlimit(tsk, resource, new_rlim);
tsk              1567 kernel/sys.c   	task_unlock(tsk->group_leader);
tsk              1577 kernel/sys.c   		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
tsk              1612 kernel/sys.c   	struct task_struct *tsk;
tsk              1627 kernel/sys.c   	tsk = pid ? find_task_by_vpid(pid) : current;
tsk              1628 kernel/sys.c   	if (!tsk) {
tsk              1632 kernel/sys.c   	ret = check_prlimit_permission(tsk, checkflags);
tsk              1637 kernel/sys.c   	get_task_struct(tsk);
tsk              1640 kernel/sys.c   	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
tsk              1649 kernel/sys.c   	put_task_struct(tsk);
tsk               161 kernel/taskstats.c 		       struct task_struct *tsk, struct taskstats *stats)
tsk               171 kernel/taskstats.c 	delayacct_add_tsk(stats, tsk);
tsk               175 kernel/taskstats.c 	stats->nvcsw = tsk->nvcsw;
tsk               176 kernel/taskstats.c 	stats->nivcsw = tsk->nivcsw;
tsk               177 kernel/taskstats.c 	bacct_add_tsk(user_ns, pid_ns, stats, tsk);
tsk               180 kernel/taskstats.c 	xacct_add_tsk(stats, tsk);
tsk               185 kernel/taskstats.c 	struct task_struct *tsk;
tsk               187 kernel/taskstats.c 	tsk = find_get_task_by_vpid(pid);
tsk               188 kernel/taskstats.c 	if (!tsk)
tsk               190 kernel/taskstats.c 	fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
tsk               191 kernel/taskstats.c 	put_task_struct(tsk);
tsk               197 kernel/taskstats.c 	struct task_struct *tsk, *first;
tsk               218 kernel/taskstats.c 	tsk = first;
tsk               221 kernel/taskstats.c 		if (tsk->exit_state)
tsk               229 kernel/taskstats.c 		delayacct_add_tsk(stats, tsk);
tsk               232 kernel/taskstats.c 		delta = start_time - tsk->start_time;
tsk               237 kernel/taskstats.c 		task_cputime(tsk, &utime, &stime);
tsk               241 kernel/taskstats.c 		stats->nvcsw += tsk->nvcsw;
tsk               242 kernel/taskstats.c 		stats->nivcsw += tsk->nivcsw;
tsk               243 kernel/taskstats.c 	} while_each_thread(first, tsk);
tsk               258 kernel/taskstats.c static void fill_tgid_exit(struct task_struct *tsk)
tsk               262 kernel/taskstats.c 	spin_lock_irqsave(&tsk->sighand->siglock, flags);
tsk               263 kernel/taskstats.c 	if (!tsk->signal->stats)
tsk               272 kernel/taskstats.c 	delayacct_add_tsk(tsk->signal->stats, tsk);
tsk               274 kernel/taskstats.c 	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
tsk               554 kernel/taskstats.c static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
tsk               556 kernel/taskstats.c 	struct signal_struct *sig = tsk->signal;
tsk               561 kernel/taskstats.c 	if (stats || thread_group_empty(tsk))
tsk               567 kernel/taskstats.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk               578 kernel/taskstats.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk               587 kernel/taskstats.c void taskstats_exit(struct task_struct *tsk, int group_dead)
tsk               604 kernel/taskstats.c 	is_thread_group = !!taskstats_tgid_alloc(tsk);
tsk               609 kernel/taskstats.c 		fill_tgid_exit(tsk);
tsk               621 kernel/taskstats.c 			 task_pid_nr_ns(tsk, &init_pid_ns));
tsk               625 kernel/taskstats.c 	fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
tsk               634 kernel/taskstats.c 			 task_tgid_nr_ns(tsk, &init_pid_ns));
tsk               638 kernel/taskstats.c 	memcpy(stats, tsk->signal->stats, sizeof(*stats));
tsk                47 kernel/time/itimer.c static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
tsk                51 kernel/time/itimer.c 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
tsk                53 kernel/time/itimer.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk                60 kernel/time/itimer.c 		thread_group_sample_cputime(tsk, samples);
tsk                70 kernel/time/itimer.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk                78 kernel/time/itimer.c 	struct task_struct *tsk = current;
tsk                82 kernel/time/itimer.c 		spin_lock_irq(&tsk->sighand->siglock);
tsk                83 kernel/time/itimer.c 		value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
tsk                85 kernel/time/itimer.c 			ktime_to_timeval(tsk->signal->it_real_incr);
tsk                86 kernel/time/itimer.c 		spin_unlock_irq(&tsk->sighand->siglock);
tsk                89 kernel/time/itimer.c 		get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
tsk                92 kernel/time/itimer.c 		get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
tsk               143 kernel/time/itimer.c static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
tsk               148 kernel/time/itimer.c 	struct cpu_itimer *it = &tsk->signal->it[clock_id];
tsk               157 kernel/time/itimer.c 	spin_lock_irq(&tsk->sighand->siglock);
tsk               164 kernel/time/itimer.c 		set_process_cpu_timer(tsk, clock_id, &nval, &oval);
tsk               171 kernel/time/itimer.c 	spin_unlock_irq(&tsk->sighand->siglock);
tsk               187 kernel/time/itimer.c 	struct task_struct *tsk = current;
tsk               201 kernel/time/itimer.c 		spin_lock_irq(&tsk->sighand->siglock);
tsk               202 kernel/time/itimer.c 		timer = &tsk->signal->real_timer;
tsk               206 kernel/time/itimer.c 				= ktime_to_timeval(tsk->signal->it_real_incr);
tsk               210 kernel/time/itimer.c 			spin_unlock_irq(&tsk->sighand->siglock);
tsk               216 kernel/time/itimer.c 			tsk->signal->it_real_incr =
tsk               220 kernel/time/itimer.c 			tsk->signal->it_real_incr = 0;
tsk               223 kernel/time/itimer.c 		spin_unlock_irq(&tsk->sighand->siglock);
tsk               226 kernel/time/itimer.c 		set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
tsk               229 kernel/time/itimer.c 		set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
tsk               277 kernel/time/posix-cpu-timers.c void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
tsk               279 kernel/time/posix-cpu-timers.c 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
tsk               280 kernel/time/posix-cpu-timers.c 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
tsk               299 kernel/time/posix-cpu-timers.c static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
tsk               301 kernel/time/posix-cpu-timers.c 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
tsk               302 kernel/time/posix-cpu-timers.c 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
tsk               313 kernel/time/posix-cpu-timers.c 		thread_group_cputime(tsk, &sum);
tsk               328 kernel/time/posix-cpu-timers.c static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
tsk               332 kernel/time/posix-cpu-timers.c 	thread_group_cputime(tsk, &ct);
tsk               365 kernel/time/posix-cpu-timers.c 	struct task_struct *tsk;
tsk               368 kernel/time/posix-cpu-timers.c 	tsk = get_task_for_clock_get(clock);
tsk               369 kernel/time/posix-cpu-timers.c 	if (!tsk)
tsk               373 kernel/time/posix-cpu-timers.c 		t = cpu_clock_sample(clkid, tsk);
tsk               375 kernel/time/posix-cpu-timers.c 		t = cpu_clock_sample_group(clkid, tsk, false);
tsk               376 kernel/time/posix-cpu-timers.c 	put_task_struct(tsk);
tsk               474 kernel/time/posix-cpu-timers.c void posix_cpu_timers_exit(struct task_struct *tsk)
tsk               476 kernel/time/posix-cpu-timers.c 	cleanup_timers(&tsk->posix_cputimers);
tsk               478 kernel/time/posix-cpu-timers.c void posix_cpu_timers_exit_group(struct task_struct *tsk)
tsk               480 kernel/time/posix-cpu-timers.c 	cleanup_timers(&tsk->signal->posix_cputimers);
tsk               799 kernel/time/posix-cpu-timers.c static inline void check_dl_overrun(struct task_struct *tsk)
tsk               801 kernel/time/posix-cpu-timers.c 	if (tsk->dl.dl_overrun) {
tsk               802 kernel/time/posix-cpu-timers.c 		tsk->dl.dl_overrun = 0;
tsk               803 kernel/time/posix-cpu-timers.c 		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
tsk               826 kernel/time/posix-cpu-timers.c static void check_thread_timers(struct task_struct *tsk,
tsk               829 kernel/time/posix-cpu-timers.c 	struct posix_cputimers *pct = &tsk->posix_cputimers;
tsk               833 kernel/time/posix-cpu-timers.c 	if (dl_task(tsk))
tsk               834 kernel/time/posix-cpu-timers.c 		check_dl_overrun(tsk);
tsk               839 kernel/time/posix-cpu-timers.c 	task_sample_cputime(tsk, samples);
tsk               845 kernel/time/posix-cpu-timers.c 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
tsk               848 kernel/time/posix-cpu-timers.c 		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
tsk               849 kernel/time/posix-cpu-timers.c 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
tsk               859 kernel/time/posix-cpu-timers.c 			tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
tsk               864 kernel/time/posix-cpu-timers.c 		tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
tsk               876 kernel/time/posix-cpu-timers.c static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
tsk               890 kernel/time/posix-cpu-timers.c 				    task_tgid(tsk), cur_time);
tsk               891 kernel/time/posix-cpu-timers.c 		__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
tsk               903 kernel/time/posix-cpu-timers.c static void check_process_timers(struct task_struct *tsk,
tsk               906 kernel/time/posix-cpu-timers.c 	struct signal_struct *const sig = tsk->signal;
tsk               935 kernel/time/posix-cpu-timers.c 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
tsk               938 kernel/time/posix-cpu-timers.c 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
tsk               942 kernel/time/posix-cpu-timers.c 	soft = task_rlimit(tsk, RLIMIT_CPU);
tsk               945 kernel/time/posix-cpu-timers.c 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
tsk              1062 kernel/time/posix-cpu-timers.c static inline bool fastpath_timer_check(struct task_struct *tsk)
tsk              1064 kernel/time/posix-cpu-timers.c 	struct posix_cputimers *pct = &tsk->posix_cputimers;
tsk              1070 kernel/time/posix-cpu-timers.c 		task_sample_cputime(tsk, samples);
tsk              1075 kernel/time/posix-cpu-timers.c 	sig = tsk->signal;
tsk              1102 kernel/time/posix-cpu-timers.c 	if (dl_task(tsk) && tsk->dl.dl_overrun)
tsk              1115 kernel/time/posix-cpu-timers.c 	struct task_struct *tsk = current;
tsk              1126 kernel/time/posix-cpu-timers.c 	if (!fastpath_timer_check(tsk))
tsk              1129 kernel/time/posix-cpu-timers.c 	if (!lock_task_sighand(tsk, &flags))
tsk              1136 kernel/time/posix-cpu-timers.c 	check_thread_timers(tsk, &firing);
tsk              1138 kernel/time/posix-cpu-timers.c 	check_process_timers(tsk, &firing);
tsk              1148 kernel/time/posix-cpu-timers.c 	unlock_task_sighand(tsk, &flags);
tsk              1178 kernel/time/posix-cpu-timers.c void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
tsk              1186 kernel/time/posix-cpu-timers.c 	nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
tsk              1187 kernel/time/posix-cpu-timers.c 	now = cpu_clock_sample_group(clkid, tsk, true);
tsk              1216 kernel/time/posix-cpu-timers.c 	tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
tsk               343 kernel/time/tick-sched.c void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
tsk               349 kernel/time/tick-sched.c 	tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
tsk               352 kernel/time/tick-sched.c void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
tsk               354 kernel/time/tick-sched.c 	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
tsk               116 kernel/trace/blktrace.c static void trace_note_tsk(struct task_struct *tsk)
tsk               121 kernel/trace/blktrace.c 	tsk->btrace_seq = blktrace_seq;
tsk               124 kernel/trace/blktrace.c 		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
tsk               125 kernel/trace/blktrace.c 			   sizeof(tsk->comm), NULL);
tsk               217 kernel/trace/blktrace.c 	struct task_struct *tsk = current;
tsk               244 kernel/trace/blktrace.c 	pid = tsk->pid;
tsk               263 kernel/trace/blktrace.c 	if (unlikely(tsk->btrace_seq != blktrace_seq))
tsk               264 kernel/trace/blktrace.c 		trace_note_tsk(tsk);
tsk              1508 kernel/trace/trace.c __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
tsk              1522 kernel/trace/trace.c 	strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
tsk              1523 kernel/trace/trace.c 	max_data->pid = tsk->pid;
tsk              1528 kernel/trace/trace.c 	if (tsk == current)
tsk              1531 kernel/trace/trace.c 		max_data->uid = task_uid(tsk);
tsk              1533 kernel/trace/trace.c 	max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
tsk              1534 kernel/trace/trace.c 	max_data->policy = tsk->policy;
tsk              1535 kernel/trace/trace.c 	max_data->rt_priority = tsk->rt_priority;
tsk              1538 kernel/trace/trace.c 	tracing_record_cmdline(tsk);
tsk              1552 kernel/trace/trace.c update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
tsk              1580 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
tsk              1595 kernel/trace/trace.c update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
tsk              1626 kernel/trace/trace.c 	__update_max_tr(tr, tsk, cpu);
tsk              2140 kernel/trace/trace.c static int trace_save_cmdline(struct task_struct *tsk)
tsk              2145 kernel/trace/trace.c 	if (!tsk->pid)
tsk              2148 kernel/trace/trace.c 	if (unlikely(tsk->pid > PID_MAX_DEFAULT))
tsk              2160 kernel/trace/trace.c 	idx = savedcmd->map_pid_to_cmdline[tsk->pid];
tsk              2174 kernel/trace/trace.c 		savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
tsk              2175 kernel/trace/trace.c 		savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
tsk              2180 kernel/trace/trace.c 	set_cmdline(idx, tsk->comm);
tsk              2232 kernel/trace/trace.c static int trace_save_tgid(struct task_struct *tsk)
tsk              2235 kernel/trace/trace.c 	if (!tsk->pid)
tsk              2238 kernel/trace/trace.c 	if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
tsk              2241 kernel/trace/trace.c 	tgid_map[tsk->pid] = tsk->tgid;
tsk              2343 kernel/trace/trace.c 	struct task_struct *tsk = current;
tsk              2346 kernel/trace/trace.c 	entry->pid			= (tsk) ? tsk->pid : 0;
tsk               783 kernel/trace/trace.h void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
tsk               786 kernel/trace/trace.h 			  struct task_struct *tsk, int cpu);
tsk                22 kernel/tsacct.c 		   struct taskstats *stats, struct task_struct *tsk)
tsk                31 kernel/tsacct.c 	delta = ktime_get_ns() - tsk->start_time;
tsk                38 kernel/tsacct.c 	if (thread_group_leader(tsk)) {
tsk                39 kernel/tsacct.c 		stats->ac_exitcode = tsk->exit_code;
tsk                40 kernel/tsacct.c 		if (tsk->flags & PF_FORKNOEXEC)
tsk                43 kernel/tsacct.c 	if (tsk->flags & PF_SUPERPRIV)
tsk                45 kernel/tsacct.c 	if (tsk->flags & PF_DUMPCORE)
tsk                47 kernel/tsacct.c 	if (tsk->flags & PF_SIGNALED)
tsk                49 kernel/tsacct.c 	stats->ac_nice	 = task_nice(tsk);
tsk                50 kernel/tsacct.c 	stats->ac_sched	 = tsk->policy;
tsk                51 kernel/tsacct.c 	stats->ac_pid	 = task_pid_nr_ns(tsk, pid_ns);
tsk                53 kernel/tsacct.c 	tcred = __task_cred(tsk);
tsk                56 kernel/tsacct.c 	stats->ac_ppid	 = pid_alive(tsk) ?
tsk                57 kernel/tsacct.c 		task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
tsk                60 kernel/tsacct.c 	task_cputime(tsk, &utime, &stime);
tsk                64 kernel/tsacct.c 	task_cputime_scaled(tsk, &utimescaled, &stimescaled);
tsk                68 kernel/tsacct.c 	stats->ac_minflt = tsk->min_flt;
tsk                69 kernel/tsacct.c 	stats->ac_majflt = tsk->maj_flt;
tsk                71 kernel/tsacct.c 	strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
tsk               116 kernel/tsacct.c static void __acct_update_integrals(struct task_struct *tsk,
tsk               121 kernel/tsacct.c 	if (!likely(tsk->mm))
tsk               125 kernel/tsacct.c 	delta = time - tsk->acct_timexpd;
tsk               130 kernel/tsacct.c 	tsk->acct_timexpd = time;
tsk               136 kernel/tsacct.c 	tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm) >> 10;
tsk               137 kernel/tsacct.c 	tsk->acct_vm_mem1 += delta * tsk->mm->total_vm >> 10;
tsk               144 kernel/tsacct.c void acct_update_integrals(struct task_struct *tsk)
tsk               150 kernel/tsacct.c 	task_cputime(tsk, &utime, &stime);
tsk               151 kernel/tsacct.c 	__acct_update_integrals(tsk, utime, stime);
tsk               159 kernel/tsacct.c void acct_account_cputime(struct task_struct *tsk)
tsk               161 kernel/tsacct.c 	__acct_update_integrals(tsk, tsk->utime, tsk->stime);
tsk               168 kernel/tsacct.c void acct_clear_integrals(struct task_struct *tsk)
tsk               170 kernel/tsacct.c 	tsk->acct_timexpd = 0;
tsk               171 kernel/tsacct.c 	tsk->acct_rss_mem1 = 0;
tsk               172 kernel/tsacct.c 	tsk->acct_vm_mem1 = 0;
tsk               703 kernel/umh.c   void __exit_umh(struct task_struct *tsk)
tsk               706 kernel/umh.c   	pid_t pid = tsk->pid;
tsk              2631 mm/compaction.c 	struct task_struct *tsk = current;
tsk              2636 mm/compaction.c 		set_cpus_allowed_ptr(tsk, cpumask);
tsk               627 mm/gup.c       static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
tsk               658 mm/gup.c       	if (tsk) {
tsk               660 mm/gup.c       			tsk->maj_flt++;
tsk               662 mm/gup.c       			tsk->min_flt++;
tsk               788 mm/gup.c       static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
tsk               860 mm/gup.c       			ret = faultin_page(tsk, vma, start, &foll_flags,
tsk               963 mm/gup.c       int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
tsk              1003 mm/gup.c       	if (tsk) {
tsk              1005 mm/gup.c       			tsk->maj_flt++;
tsk              1007 mm/gup.c       			tsk->min_flt++;
tsk              1013 mm/gup.c       static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
tsk              1038 mm/gup.c       		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
tsk              1081 mm/gup.c       		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
tsk              1164 mm/gup.c       long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
tsk              1178 mm/gup.c       	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
tsk              1331 mm/gup.c       static long __get_user_pages_locked(struct task_struct *tsk,
tsk              1448 mm/gup.c       static long check_and_migrate_cma_pages(struct task_struct *tsk,
tsk              1522 mm/gup.c       		nr_pages = __get_user_pages_locked(tsk, mm, start, nr_pages,
tsk              1535 mm/gup.c       static long check_and_migrate_cma_pages(struct task_struct *tsk,
tsk              1551 mm/gup.c       static long __gup_longterm_locked(struct task_struct *tsk,
tsk              1577 mm/gup.c       	rc = __get_user_pages_locked(tsk, mm, start, nr_pages, pages,
tsk              1592 mm/gup.c       		rc = check_and_migrate_cma_pages(tsk, mm, start, rc, pages,
tsk              1602 mm/gup.c       static __always_inline long __gup_longterm_locked(struct task_struct *tsk,
tsk              1610 mm/gup.c       	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
tsk               199 mm/memory-failure.c 	struct task_struct *tsk;
tsk               211 mm/memory-failure.c 	struct task_struct *t = tk->tsk;
tsk               308 mm/memory-failure.c static void add_to_kill(struct task_struct *tsk, struct page *p,
tsk               343 mm/memory-failure.c 			page_to_pfn(p), tsk->comm);
tsk               348 mm/memory-failure.c 	get_task_struct(tsk);
tsk               349 mm/memory-failure.c 	tk->tsk = tsk;
tsk               375 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
tsk               377 mm/memory-failure.c 						 tk->tsk, PIDTYPE_PID);
tsk               388 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
tsk               390 mm/memory-failure.c 		put_task_struct(tk->tsk);
tsk               403 mm/memory-failure.c static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
tsk               407 mm/memory-failure.c 	for_each_thread(tsk, t)
tsk               419 mm/memory-failure.c static struct task_struct *task_early_kill(struct task_struct *tsk,
tsk               423 mm/memory-failure.c 	if (!tsk->mm)
tsk               426 mm/memory-failure.c 		return tsk;
tsk               427 mm/memory-failure.c 	t = find_early_kill_thread(tsk);
tsk               431 mm/memory-failure.c 		return tsk;
tsk               442 mm/memory-failure.c 	struct task_struct *tsk;
tsk               452 mm/memory-failure.c 	for_each_process (tsk) {
tsk               454 mm/memory-failure.c 		struct task_struct *t = task_early_kill(tsk, force_early);
tsk               478 mm/memory-failure.c 	struct task_struct *tsk;
tsk               483 mm/memory-failure.c 	for_each_process(tsk) {
tsk               485 mm/memory-failure.c 		struct task_struct *t = task_early_kill(tsk, force_early);
tsk              4313 mm/memory.c    int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
tsk              4329 mm/memory.c    		ret = get_user_pages_remote(tsk, mm, addr, 1,
tsk              4399 mm/memory.c    int access_process_vm(struct task_struct *tsk, unsigned long addr,
tsk              4405 mm/memory.c    	mm = get_task_mm(tsk);
tsk              4409 mm/memory.c    	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
tsk               367 mm/mempolicy.c void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
tsk               369 mm/mempolicy.c 	mpol_rebind_policy(tsk->mempolicy, new);
tsk              2021 mm/mempolicy.c bool mempolicy_nodemask_intersects(struct task_struct *tsk,
tsk              2029 mm/mempolicy.c 	task_lock(tsk);
tsk              2030 mm/mempolicy.c 	mempolicy = tsk->mempolicy;
tsk              2051 mm/mempolicy.c 	task_unlock(tsk);
tsk                25 mm/mmu_context.c 	struct task_struct *tsk = current;
tsk                27 mm/mmu_context.c 	task_lock(tsk);
tsk                28 mm/mmu_context.c 	active_mm = tsk->active_mm;
tsk                31 mm/mmu_context.c 		tsk->active_mm = mm;
tsk                33 mm/mmu_context.c 	tsk->mm = mm;
tsk                34 mm/mmu_context.c 	switch_mm(active_mm, mm, tsk);
tsk                35 mm/mmu_context.c 	task_unlock(tsk);
tsk                55 mm/mmu_context.c 	struct task_struct *tsk = current;
tsk                57 mm/mmu_context.c 	task_lock(tsk);
tsk                59 mm/mmu_context.c 	tsk->mm = NULL;
tsk                61 mm/mmu_context.c 	enter_lazy_tlb(mm, tsk);
tsk                62 mm/mmu_context.c 	task_unlock(tsk);
tsk              1707 mm/nommu.c     int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
tsk              1761 mm/nommu.c     int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
tsk              1769 mm/nommu.c     	mm = get_task_mm(tsk);
tsk              1773 mm/nommu.c     	len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
tsk                88 mm/oom_kill.c  	struct task_struct *tsk;
tsk                96 mm/oom_kill.c  	for_each_thread(start, tsk) {
tsk               104 mm/oom_kill.c  			ret = mempolicy_nodemask_intersects(tsk, mask);
tsk               110 mm/oom_kill.c  			ret = cpuset_mems_allowed_intersects(current, tsk);
tsk               120 mm/oom_kill.c  static bool oom_cpuset_eligible(struct task_struct *tsk, struct oom_control *oc)
tsk               567 mm/oom_kill.c  static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
tsk               572 mm/oom_kill.c  		trace_skip_task_reaping(tsk->pid);
tsk               583 mm/oom_kill.c  		trace_skip_task_reaping(tsk->pid);
tsk               587 mm/oom_kill.c  	trace_start_task_reaping(tsk->pid);
tsk               595 mm/oom_kill.c  			task_pid_nr(tsk), tsk->comm,
tsk               600 mm/oom_kill.c  	trace_finish_task_reaping(tsk->pid);
tsk               608 mm/oom_kill.c  static void oom_reap_task(struct task_struct *tsk)
tsk               611 mm/oom_kill.c  	struct mm_struct *mm = tsk->signal->oom_mm;
tsk               614 mm/oom_kill.c  	while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
tsk               622 mm/oom_kill.c  		task_pid_nr(tsk), tsk->comm);
tsk               626 mm/oom_kill.c  	tsk->oom_reaper_list = NULL;
tsk               635 mm/oom_kill.c  	put_task_struct(tsk);
tsk               641 mm/oom_kill.c  		struct task_struct *tsk = NULL;
tsk               646 mm/oom_kill.c  			tsk = oom_reaper_list;
tsk               647 mm/oom_kill.c  			oom_reaper_list = tsk->oom_reaper_list;
tsk               651 mm/oom_kill.c  		if (tsk)
tsk               652 mm/oom_kill.c  			oom_reap_task(tsk);
tsk               658 mm/oom_kill.c  static void wake_oom_reaper(struct task_struct *tsk)
tsk               661 mm/oom_kill.c  	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
tsk               664 mm/oom_kill.c  	get_task_struct(tsk);
tsk               667 mm/oom_kill.c  	tsk->oom_reaper_list = oom_reaper_list;
tsk               668 mm/oom_kill.c  	oom_reaper_list = tsk;
tsk               670 mm/oom_kill.c  	trace_wake_reaper(tsk->pid);
tsk               681 mm/oom_kill.c  static inline void wake_oom_reaper(struct task_struct *tsk)
tsk               696 mm/oom_kill.c  static void mark_oom_victim(struct task_struct *tsk)
tsk               698 mm/oom_kill.c  	struct mm_struct *mm = tsk->mm;
tsk               702 mm/oom_kill.c  	if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
tsk               706 mm/oom_kill.c  	if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
tsk               707 mm/oom_kill.c  		mmgrab(tsk->signal->oom_mm);
tsk               717 mm/oom_kill.c  	__thaw_task(tsk);
tsk               719 mm/oom_kill.c  	trace_mark_victim(tsk->pid);
tsk               404 mm/page-writeback.c 	struct task_struct *tsk;
tsk               438 mm/page-writeback.c 	tsk = current;
tsk               439 mm/page-writeback.c 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
tsk               480 mm/page-writeback.c 	struct task_struct *tsk = current;
tsk               489 mm/page-writeback.c 	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
tsk              4222 mm/page_alloc.c static bool oom_reserves_allowed(struct task_struct *tsk)
tsk              4224 mm/page_alloc.c 	if (!tsk_is_oom_victim(tsk))
tsk              3899 mm/vmscan.c    	struct task_struct *tsk = current;
tsk              3903 mm/vmscan.c    		set_cpus_allowed_ptr(tsk, cpumask);
tsk              3917 mm/vmscan.c    	tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
tsk              3964 mm/vmscan.c    	tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
tsk                18 net/bpfilter/bpfilter_kern.c 	struct task_struct *tsk;
tsk                23 net/bpfilter/bpfilter_kern.c 	tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
tsk                24 net/bpfilter/bpfilter_kern.c 	if (tsk) {
tsk                25 net/bpfilter/bpfilter_kern.c 		send_sig(SIGKILL, tsk, 1);
tsk                26 net/bpfilter/bpfilter_kern.c 		put_task_struct(tsk);
tsk               676 net/core/net_namespace.c 	struct task_struct *tsk;
tsk               682 net/core/net_namespace.c 	tsk = find_task_by_vpid(pid);
tsk               683 net/core/net_namespace.c 	if (tsk) {
tsk               685 net/core/net_namespace.c 		task_lock(tsk);
tsk               686 net/core/net_namespace.c 		nsproxy = tsk->nsproxy;
tsk               689 net/core/net_namespace.c 		task_unlock(tsk);
tsk               442 net/core/pktgen.c 	struct task_struct *tsk;
tsk              3273 net/core/pktgen.c 	remove_proc_entry(t->tsk->comm, t->net->proc_dir);
tsk              3522 net/core/pktgen.c 	pr_debug("%s stopping all device\n", t->tsk->comm);
tsk              3525 net/core/pktgen.c 	pr_debug("%s removing all device\n", t->tsk->comm);
tsk              3528 net/core/pktgen.c 	pr_debug("%s removing thread\n", t->tsk->comm);
tsk              3709 net/core/pktgen.c 	t->tsk = p;
tsk              3711 net/core/pktgen.c 	pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir,
tsk              3715 net/core/pktgen.c 		       PG_PROC_DIR, t->tsk->comm);
tsk              3845 net/core/pktgen.c 		kthread_stop(t->tsk);
tsk              3846 net/core/pktgen.c 		put_task_struct(t->tsk);
tsk                59 net/core/stream.c 	struct task_struct *tsk = current;
tsk                70 net/core/stream.c 		if (signal_pending(tsk))
tsk               223 net/phonet/socket.c 	struct task_struct *tsk = current;
tsk               269 net/phonet/socket.c 		if (signal_pending(tsk)) {
tsk                51 net/tipc/diag.c 				struct tipc_sock *tsk)
tsk                62 net/tipc/diag.c 	err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
tsk               130 net/tipc/socket.c static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
tsk               132 net/tipc/socket.c static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
tsk               134 net/tipc/socket.c static int tipc_sk_leave(struct tipc_sock *tsk);
tsk               136 net/tipc/socket.c static int tipc_sk_insert(struct tipc_sock *tsk);
tsk               137 net/tipc/socket.c static void tipc_sk_remove(struct tipc_sock *tsk);
tsk               147 net/tipc/socket.c static u32 tsk_own_node(struct tipc_sock *tsk)
tsk               149 net/tipc/socket.c 	return msg_prevnode(&tsk->phdr);
tsk               152 net/tipc/socket.c static u32 tsk_peer_node(struct tipc_sock *tsk)
tsk               154 net/tipc/socket.c 	return msg_destnode(&tsk->phdr);
tsk               157 net/tipc/socket.c static u32 tsk_peer_port(struct tipc_sock *tsk)
tsk               159 net/tipc/socket.c 	return msg_destport(&tsk->phdr);
tsk               162 net/tipc/socket.c static  bool tsk_unreliable(struct tipc_sock *tsk)
tsk               164 net/tipc/socket.c 	return msg_src_droppable(&tsk->phdr) != 0;
tsk               167 net/tipc/socket.c static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
tsk               169 net/tipc/socket.c 	msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
tsk               172 net/tipc/socket.c static bool tsk_unreturnable(struct tipc_sock *tsk)
tsk               174 net/tipc/socket.c 	return msg_dest_droppable(&tsk->phdr) != 0;
tsk               177 net/tipc/socket.c static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
tsk               179 net/tipc/socket.c 	msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
tsk               182 net/tipc/socket.c static int tsk_importance(struct tipc_sock *tsk)
tsk               184 net/tipc/socket.c 	return msg_importance(&tsk->phdr);
tsk               187 net/tipc/socket.c static int tsk_set_importance(struct tipc_sock *tsk, int imp)
tsk               191 net/tipc/socket.c 	msg_set_importance(&tsk->phdr, (u32)imp);
tsk               200 net/tipc/socket.c static bool tsk_conn_cong(struct tipc_sock *tsk)
tsk               202 net/tipc/socket.c 	return tsk->snt_unacked > tsk->snd_win;
tsk               223 net/tipc/socket.c static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
tsk               225 net/tipc/socket.c 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
tsk               291 net/tipc/socket.c static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
tsk               293 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk               295 net/tipc/socket.c 	u32 peer_port = tsk_peer_port(tsk);
tsk               305 net/tipc/socket.c 	peer_node = tsk_peer_node(tsk);
tsk               420 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk               447 net/tipc/socket.c 	tsk = tipc_sk(sk);
tsk               448 net/tipc/socket.c 	tsk->max_pkt = MAX_PKT_DEFAULT;
tsk               449 net/tipc/socket.c 	INIT_LIST_HEAD(&tsk->publications);
tsk               450 net/tipc/socket.c 	INIT_LIST_HEAD(&tsk->cong_links);
tsk               451 net/tipc/socket.c 	msg = &tsk->phdr;
tsk               457 net/tipc/socket.c 	if (tipc_sk_insert(tsk)) {
tsk               468 net/tipc/socket.c 	msg_set_origport(msg, tsk->portid);
tsk               476 net/tipc/socket.c 	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk               477 net/tipc/socket.c 	tsk->group_is_open = true;
tsk               478 net/tipc/socket.c 	atomic_set(&tsk->dupl_rcvcnt, 0);
tsk               481 net/tipc/socket.c 	tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
tsk               482 net/tipc/socket.c 	tsk->rcv_win = tsk->snd_win;
tsk               485 net/tipc/socket.c 		tsk_set_unreturnable(tsk, true);
tsk               487 net/tipc/socket.c 			tsk_set_unreliable(tsk, true);
tsk               489 net/tipc/socket.c 	__skb_queue_head_init(&tsk->mc_method.deferredq);
tsk               496 net/tipc/socket.c 	struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
tsk               498 net/tipc/socket.c 	sock_put(&tsk->sk);
tsk               505 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               508 net/tipc/socket.c 	u32 dnode = tsk_peer_node(tsk);
tsk               512 net/tipc/socket.c 	tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
tsk               513 net/tipc/socket.c 					    !tsk_conn_cong(tsk)));
tsk               529 net/tipc/socket.c 			tipc_node_remove_conn(net, dnode, tsk->portid);
tsk               540 net/tipc/socket.c 				      tsk_own_node(tsk), tsk_peer_port(tsk),
tsk               541 net/tipc/socket.c 				      tsk->portid, error);
tsk               543 net/tipc/socket.c 			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
tsk               544 net/tipc/socket.c 		tipc_node_remove_conn(net, dnode, tsk->portid);
tsk               568 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk               577 net/tipc/socket.c 	tsk = tipc_sk(sk);
tsk               583 net/tipc/socket.c 	tipc_sk_leave(tsk);
tsk               584 net/tipc/socket.c 	tipc_sk_withdraw(tsk, 0, NULL);
tsk               585 net/tipc/socket.c 	__skb_queue_purge(&tsk->mc_method.deferredq);
tsk               587 net/tipc/socket.c 	tipc_sk_remove(tsk);
tsk               592 net/tipc/socket.c 	tipc_dest_list_purge(&tsk->cong_links);
tsk               593 net/tipc/socket.c 	tsk->cong_link_cnt = 0;
tsk               594 net/tipc/socket.c 	call_rcu(&tsk->rcu, tipc_sk_callback);
tsk               620 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               625 net/tipc/socket.c 		res = tipc_sk_withdraw(tsk, 0, NULL);
tsk               628 net/tipc/socket.c 	if (tsk->group) {
tsk               656 net/tipc/socket.c 		tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
tsk               657 net/tipc/socket.c 		tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
tsk               681 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               688 net/tipc/socket.c 		addr->addr.id.ref = tsk_peer_port(tsk);
tsk               689 net/tipc/socket.c 		addr->addr.id.node = tsk_peer_node(tsk);
tsk               691 net/tipc/socket.c 		addr->addr.id.ref = tsk->portid;
tsk               725 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               738 net/tipc/socket.c 		if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
tsk               747 net/tipc/socket.c 		if (tsk->group_is_open && !tsk->cong_link_cnt)
tsk               777 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               778 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk               781 net/tipc/socket.c 	struct tipc_mc_method *method = &tsk->mc_method;
tsk               786 net/tipc/socket.c 	if (tsk->group)
tsk               790 net/tipc/socket.c 	rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
tsk               820 net/tipc/socket.c 				     &tsk->cong_link_cnt);
tsk               837 net/tipc/socket.c static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
tsk               841 net/tipc/socket.c 	u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
tsk               842 net/tipc/socket.c 	struct tipc_mc_method *method = &tsk->mc_method;
tsk               844 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk               857 net/tipc/socket.c 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
tsk               863 net/tipc/socket.c 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
tsk               865 net/tipc/socket.c 		tipc_dest_push(&tsk->cong_links, dnode, 0);
tsk               866 net/tipc/socket.c 		tsk->cong_link_cnt++;
tsk               894 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               907 net/tipc/socket.c 				!tipc_dest_find(&tsk->cong_links, node, 0) &&
tsk               908 net/tipc/socket.c 				tsk->group &&
tsk               909 net/tipc/socket.c 				!tipc_group_cong(tsk->group, node, port, blks,
tsk               917 net/tipc/socket.c 	rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
tsk               937 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk               938 net/tipc/socket.c 	struct list_head *cong_links = &tsk->cong_links;
tsk               940 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk               958 net/tipc/socket.c 		exclude = tipc_group_exclude(tsk->group);
tsk               968 net/tipc/socket.c 			cong = tipc_group_cong(tsk->group, node, port, blks,
tsk               988 net/tipc/socket.c 					tsk->group &&
tsk               989 net/tipc/socket.c 					!tipc_group_cong(tsk->group, node, port,
tsk              1002 net/tipc/socket.c 	rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
tsk              1023 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1025 net/tipc/socket.c 	struct tipc_mc_method *method = &tsk->mc_method;
tsk              1028 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk              1035 net/tipc/socket.c 				!tsk->cong_link_cnt && tsk->group &&
tsk              1036 net/tipc/socket.c 				!tipc_group_bc_cong(tsk->group, blks));
tsk              1040 net/tipc/socket.c 	dsts = tipc_group_dests(tsk->group);
tsk              1055 net/tipc/socket.c 	msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
tsk              1067 net/tipc/socket.c 	rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
tsk              1072 net/tipc/socket.c 	tipc_group_update_bc_members(tsk->group, blks, ack);
tsk              1096 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1097 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              1098 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk              1216 net/tipc/socket.c static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
tsk              1221 net/tipc/socket.c 	u32 onode = tsk_own_node(tsk);
tsk              1222 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              1227 net/tipc/socket.c 	if (!tsk_peer_msg(tsk, hdr)) {
tsk              1234 net/tipc/socket.c 		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk              1235 net/tipc/socket.c 				      tsk_peer_port(tsk));
tsk              1249 net/tipc/socket.c 	tsk->probe_unacked = false;
tsk              1257 net/tipc/socket.c 		conn_cong = tsk_conn_cong(tsk);
tsk              1258 net/tipc/socket.c 		tsk->snt_unacked -= msg_conn_ack(hdr);
tsk              1259 net/tipc/socket.c 		if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
tsk              1260 net/tipc/socket.c 			tsk->snd_win = msg_adv_win(hdr);
tsk              1300 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1303 net/tipc/socket.c 	struct list_head *clinks = &tsk->cong_links;
tsk              1305 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              1306 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk              1336 net/tipc/socket.c 		dest = &tsk->peer;
tsk              1346 net/tipc/socket.c 		if (tsk->published)
tsk              1349 net/tipc/socket.c 			tsk->conn_type = dest->addr.name.name.type;
tsk              1350 net/tipc/socket.c 			tsk->conn_instance = dest->addr.name.name.instance;
tsk              1395 net/tipc/socket.c 	mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
tsk              1405 net/tipc/socket.c 	rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
tsk              1408 net/tipc/socket.c 		tsk->cong_link_cnt++;
tsk              1446 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1447 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk              1450 net/tipc/socket.c 	u32 dnode = tsk_peer_node(tsk);
tsk              1463 net/tipc/socket.c 			tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk              1464 net/tipc/socket.c 			tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
tsk              1471 net/tipc/socket.c 					(!tsk->cong_link_cnt &&
tsk              1472 net/tipc/socket.c 					 !tsk_conn_cong(tsk) &&
tsk              1478 net/tipc/socket.c 		rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
tsk              1484 net/tipc/socket.c 		rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
tsk              1486 net/tipc/socket.c 			tsk->cong_link_cnt = 1;
tsk              1490 net/tipc/socket.c 			tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
tsk              1518 net/tipc/socket.c static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
tsk              1521 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              1523 net/tipc/socket.c 	struct tipc_msg *msg = &tsk->phdr;
tsk              1534 net/tipc/socket.c 	tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
tsk              1535 net/tipc/socket.c 	tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
tsk              1536 net/tipc/socket.c 	tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
tsk              1538 net/tipc/socket.c 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
tsk              1542 net/tipc/socket.c 	tsk->rcv_win = FLOWCTL_MSG_WIN;
tsk              1543 net/tipc/socket.c 	tsk->snd_win = FLOWCTL_MSG_WIN;
tsk              1593 net/tipc/socket.c 				 struct tipc_sock *tsk)
tsk              1641 net/tipc/socket.c 		has_name = (tsk->conn_type != 0);
tsk              1642 net/tipc/socket.c 		anc_data[0] = tsk->conn_type;
tsk              1643 net/tipc/socket.c 		anc_data[1] = tsk->conn_instance;
tsk              1644 net/tipc/socket.c 		anc_data[2] = tsk->conn_instance;
tsk              1658 net/tipc/socket.c static void tipc_sk_send_ack(struct tipc_sock *tsk)
tsk              1660 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              1664 net/tipc/socket.c 	u32 peer_port = tsk_peer_port(tsk);
tsk              1665 net/tipc/socket.c 	u32 dnode = tsk_peer_node(tsk);
tsk              1670 net/tipc/socket.c 			      dnode, tsk_own_node(tsk), peer_port,
tsk              1671 net/tipc/socket.c 			      tsk->portid, TIPC_OK);
tsk              1675 net/tipc/socket.c 	msg_set_conn_ack(msg, tsk->rcv_unacked);
tsk              1676 net/tipc/socket.c 	tsk->rcv_unacked = 0;
tsk              1679 net/tipc/socket.c 	if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
tsk              1680 net/tipc/socket.c 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
tsk              1681 net/tipc/socket.c 		msg_set_adv_win(msg, tsk->rcv_win);
tsk              1743 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1780 net/tipc/socket.c 	rc = tipc_sk_anc_data_recv(m, skb, tsk);
tsk              1813 net/tipc/socket.c 	if (tsk->group && msg_in_group(hdr) && !grp_evt) {
tsk              1815 net/tipc/socket.c 		tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
tsk              1827 net/tipc/socket.c 	tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
tsk              1828 net/tipc/socket.c 	if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tsk              1829 net/tipc/socket.c 		tipc_sk_send_ack(tsk);
tsk              1850 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1893 net/tipc/socket.c 			rc = tipc_sk_anc_data_recv(m, skb, tsk);
tsk              1927 net/tipc/socket.c 		tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
tsk              1928 net/tipc/socket.c 		if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
tsk              1929 net/tipc/socket.c 			tipc_sk_send_ack(tsk);
tsk              1984 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              1986 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              1991 net/tipc/socket.c 		tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
tsk              1994 net/tipc/socket.c 		tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
tsk              1997 net/tipc/socket.c 		tsk->cong_link_cnt--;
tsk              2004 net/tipc/socket.c 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
tsk              2023 net/tipc/socket.c static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
tsk              2025 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              2029 net/tipc/socket.c 	u32 pport = tsk_peer_port(tsk);
tsk              2030 net/tipc/socket.c 	u32 pnode = tsk_peer_node(tsk);
tsk              2045 net/tipc/socket.c 			tipc_sk_finish_conn(tsk, oport, onode);
tsk              2046 net/tipc/socket.c 			msg_set_importance(&tsk->phdr, msg_importance(hdr));
tsk              2067 net/tipc/socket.c 		delay %= (tsk->conn_timeout / 4);
tsk              2086 net/tipc/socket.c 		if (!tsk_peer_msg(tsk, hdr))
tsk              2091 net/tipc/socket.c 		tipc_node_remove_conn(net, pnode, tsk->portid);
tsk              2124 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2133 net/tipc/socket.c 	if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
tsk              2154 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2155 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              2174 net/tipc/socket.c 		tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
tsk              2180 net/tipc/socket.c 		if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
tsk              2299 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              2306 net/tipc/socket.c 		tsk = tipc_sk_lookup(net, dport);
tsk              2308 net/tipc/socket.c 		if (likely(tsk)) {
tsk              2309 net/tipc/socket.c 			sk = &tsk->sk;
tsk              2386 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2389 net/tipc/socket.c 	long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
tsk              2398 net/tipc/socket.c 	if (tsk->group) {
tsk              2404 net/tipc/socket.c 		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
tsk              2415 net/tipc/socket.c 		memcpy(&tsk->peer, dest, destlen);
tsk              2639 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2640 net/tipc/socket.c 	u32 pnode = tsk_peer_node(tsk);
tsk              2641 net/tipc/socket.c 	u32 pport = tsk_peer_port(tsk);
tsk              2642 net/tipc/socket.c 	u32 self = tsk_own_node(tsk);
tsk              2643 net/tipc/socket.c 	u32 oport = tsk->portid;
tsk              2646 net/tipc/socket.c 	if (tsk->probe_unacked) {
tsk              2658 net/tipc/socket.c 	tsk->probe_unacked = true;
tsk              2664 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2667 net/tipc/socket.c 	if (tsk->cong_link_cnt) {
tsk              2678 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2679 net/tipc/socket.c 	u32 pnode = tsk_peer_node(tsk);
tsk              2702 net/tipc/socket.c 		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
tsk              2706 net/tipc/socket.c 		tipc_dest_push(&tsk->cong_links, pnode, 0);
tsk              2707 net/tipc/socket.c 		tsk->cong_link_cnt = 1;
tsk              2712 net/tipc/socket.c static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
tsk              2715 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              2725 net/tipc/socket.c 	key = tsk->portid + tsk->pub_count + 1;
tsk              2726 net/tipc/socket.c 	if (key == tsk->portid)
tsk              2730 net/tipc/socket.c 				    scope, tsk->portid, key);
tsk              2734 net/tipc/socket.c 	list_add(&publ->binding_sock, &tsk->publications);
tsk              2735 net/tipc/socket.c 	tsk->pub_count++;
tsk              2736 net/tipc/socket.c 	tsk->published = 1;
tsk              2740 net/tipc/socket.c static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
tsk              2743 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
tsk              2751 net/tipc/socket.c 	list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
tsk              2770 net/tipc/socket.c 	if (list_empty(&tsk->publications))
tsk              2771 net/tipc/socket.c 		tsk->published = 0;
tsk              2782 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              2790 net/tipc/socket.c 		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
tsk              2791 net/tipc/socket.c 			sock_hold(&tsk->sk);
tsk              2793 net/tipc/socket.c 			lock_sock(&tsk->sk);
tsk              2794 net/tipc/socket.c 			msg = &tsk->phdr;
tsk              2797 net/tipc/socket.c 			release_sock(&tsk->sk);
tsk              2799 net/tipc/socket.c 			sock_put(&tsk->sk);
tsk              2803 net/tipc/socket.c 	} while (tsk == ERR_PTR(-EAGAIN));
tsk              2811 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              2814 net/tipc/socket.c 	tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
tsk              2815 net/tipc/socket.c 	if (tsk)
tsk              2816 net/tipc/socket.c 		sock_hold(&tsk->sk);
tsk              2819 net/tipc/socket.c 	return tsk;
tsk              2822 net/tipc/socket.c static int tipc_sk_insert(struct tipc_sock *tsk)
tsk              2824 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              2834 net/tipc/socket.c 		tsk->portid = portid;
tsk              2835 net/tipc/socket.c 		sock_hold(&tsk->sk);
tsk              2836 net/tipc/socket.c 		if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
tsk              2839 net/tipc/socket.c 		sock_put(&tsk->sk);
tsk              2845 net/tipc/socket.c static void tipc_sk_remove(struct tipc_sock *tsk)
tsk              2847 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              2850 net/tipc/socket.c 	if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
tsk              2883 net/tipc/socket.c static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
tsk              2885 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
tsk              2886 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              2887 net/tipc/socket.c 	struct tipc_msg *hdr = &tsk->phdr;
tsk              2897 net/tipc/socket.c 	grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
tsk              2900 net/tipc/socket.c 	tsk->group = grp;
tsk              2908 net/tipc/socket.c 	rc = tipc_sk_publish(tsk, mreq->scope, &seq);
tsk              2911 net/tipc/socket.c 		tsk->group = NULL;
tsk              2915 net/tipc/socket.c 	tsk->mc_method.rcast = true;
tsk              2916 net/tipc/socket.c 	tsk->mc_method.mandatory = true;
tsk              2917 net/tipc/socket.c 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
tsk              2921 net/tipc/socket.c static int tipc_sk_leave(struct tipc_sock *tsk)
tsk              2923 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
tsk              2924 net/tipc/socket.c 	struct tipc_group *grp = tsk->group;
tsk              2932 net/tipc/socket.c 	tsk->group = NULL;
tsk              2933 net/tipc/socket.c 	tipc_sk_withdraw(tsk, scope, &seq);
tsk              2954 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              2989 net/tipc/socket.c 		res = tsk_set_importance(tsk, value);
tsk              2993 net/tipc/socket.c 			tsk_set_unreliable(tsk, value);
tsk              2998 net/tipc/socket.c 		tsk_set_unreturnable(tsk, value);
tsk              3004 net/tipc/socket.c 		tsk->mc_method.rcast = false;
tsk              3005 net/tipc/socket.c 		tsk->mc_method.mandatory = true;
tsk              3008 net/tipc/socket.c 		tsk->mc_method.rcast = true;
tsk              3009 net/tipc/socket.c 		tsk->mc_method.mandatory = true;
tsk              3012 net/tipc/socket.c 		res = tipc_sk_join(tsk, &mreq);
tsk              3015 net/tipc/socket.c 		res = tipc_sk_leave(tsk);
tsk              3043 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
tsk              3061 net/tipc/socket.c 		value = tsk_importance(tsk);
tsk              3064 net/tipc/socket.c 		value = tsk_unreliable(tsk);
tsk              3067 net/tipc/socket.c 		value = tsk_unreturnable(tsk);
tsk              3070 net/tipc/socket.c 		value = tsk->conn_timeout;
tsk              3084 net/tipc/socket.c 		if (tsk->group)
tsk              3085 net/tipc/socket.c 			tipc_group_self(tsk->group, &seq, &scope);
tsk              3273 net/tipc/socket.c static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
tsk              3279 net/tipc/socket.c 	peer_node = tsk_peer_node(tsk);
tsk              3280 net/tipc/socket.c 	peer_port = tsk_peer_port(tsk);
tsk              3291 net/tipc/socket.c 	if (tsk->conn_type != 0) {
tsk              3294 net/tipc/socket.c 		if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
tsk              3296 net/tipc/socket.c 		if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
tsk              3310 net/tipc/socket.c 			  *tsk)
tsk              3313 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              3315 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
tsk              3320 net/tipc/socket.c 		if (__tipc_nl_add_sk_con(skb, tsk))
tsk              3322 net/tipc/socket.c 	} else if (!list_empty(&tsk->publications)) {
tsk              3331 net/tipc/socket.c 			    struct tipc_sock *tsk)
tsk              3345 net/tipc/socket.c 	if (__tipc_nl_add_sk_info(skb, tsk))
tsk              3364 net/tipc/socket.c 				       struct tipc_sock *tsk))
tsk              3367 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              3371 net/tipc/socket.c 	while ((tsk = rhashtable_walk_next(iter)) != NULL) {
tsk              3372 net/tipc/socket.c 		if (IS_ERR(tsk)) {
tsk              3373 net/tipc/socket.c 			err = PTR_ERR(tsk);
tsk              3381 net/tipc/socket.c 		sock_hold(&tsk->sk);
tsk              3383 net/tipc/socket.c 		lock_sock(&tsk->sk);
tsk              3384 net/tipc/socket.c 		err = skb_handler(skb, cb, tsk);
tsk              3386 net/tipc/socket.c 			release_sock(&tsk->sk);
tsk              3387 net/tipc/socket.c 			sock_put(&tsk->sk);
tsk              3390 net/tipc/socket.c 		release_sock(&tsk->sk);
tsk              3392 net/tipc/socket.c 		sock_put(&tsk->sk);
tsk              3435 net/tipc/socket.c 			   struct tipc_sock *tsk, u32 sk_filter_state,
tsk              3438 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
tsk              3450 net/tipc/socket.c 	if (__tipc_nl_add_sk_info(skb, tsk))
tsk              3476 net/tipc/socket.c 	if (tsk->cong_link_cnt &&
tsk              3480 net/tipc/socket.c 	if (tsk_conn_cong(tsk) &&
tsk              3486 net/tipc/socket.c 	if (tsk->group)
tsk              3487 net/tipc/socket.c 		if (tipc_group_fill_sock_diag(tsk->group, skb))
tsk              3550 net/tipc/socket.c 				  struct tipc_sock *tsk, u32 *last_publ)
tsk              3556 net/tipc/socket.c 		list_for_each_entry(p, &tsk->publications, binding_sock) {
tsk              3572 net/tipc/socket.c 		p = list_first_entry(&tsk->publications, struct publication,
tsk              3576 net/tipc/socket.c 	list_for_each_entry_from(p, &tsk->publications, binding_sock) {
tsk              3595 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              3623 net/tipc/socket.c 	tsk = tipc_sk_lookup(net, tsk_portid);
tsk              3624 net/tipc/socket.c 	if (!tsk)
tsk              3627 net/tipc/socket.c 	lock_sock(&tsk->sk);
tsk              3628 net/tipc/socket.c 	err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
tsk              3631 net/tipc/socket.c 	release_sock(&tsk->sk);
tsk              3632 net/tipc/socket.c 	sock_put(&tsk->sk);
tsk              3653 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              3661 net/tipc/socket.c 	tsk = tipc_sk(sk);
tsk              3673 net/tipc/socket.c 		return (_port == tsk->portid);
tsk              3678 net/tipc/socket.c 	if (tsk->published) {
tsk              3679 net/tipc/socket.c 		p = list_first_entry_or_null(&tsk->publications,
tsk              3689 net/tipc/socket.c 		type = tsk->conn_type;
tsk              3690 net/tipc/socket.c 		lower = tsk->conn_instance;
tsk              3691 net/tipc/socket.c 		upper = tsk->conn_instance;
tsk              3756 net/tipc/socket.c 	struct tipc_sock *tsk;
tsk              3765 net/tipc/socket.c 	tsk = tipc_sk(sk);
tsk              3770 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
tsk              3771 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
tsk              3774 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
tsk              3775 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
tsk              3776 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
tsk              3777 net/tipc/socket.c 		i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
tsk              3779 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
tsk              3780 net/tipc/socket.c 	if (tsk->published) {
tsk              3781 net/tipc/socket.c 		p = list_first_entry_or_null(&tsk->publications,
tsk              3787 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
tsk              3788 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
tsk              3789 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
tsk              3790 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
tsk              3791 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
tsk              3792 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
tsk              3793 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
tsk              3794 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
tsk                65 net/tipc/socket.h 			   struct tipc_sock *tsk, u32 sk_filter_state,
tsk                70 net/tipc/socket.h 				       struct tipc_sock *tsk));
tsk              1435 net/unix/af_unix.c 	struct sock *tsk;
tsk              1459 net/unix/af_unix.c 	tsk = skb->sk;
tsk              1464 net/unix/af_unix.c 	unix_state_lock(tsk);
tsk              1467 net/unix/af_unix.c 	sock_graft(tsk, newsock);
tsk              1468 net/unix/af_unix.c 	unix_state_unlock(tsk);
tsk                18 samples/bpf/test_overhead_kprobe_kern.c 	struct task_struct *tsk;
tsk                24 samples/bpf/test_overhead_kprobe_kern.c 	tsk = (void *)PT_REGS_PARM1(ctx);
tsk                26 samples/bpf/test_overhead_kprobe_kern.c 	pid = _(tsk->pid);
tsk                27 samples/bpf/test_overhead_kprobe_kern.c 	bpf_probe_read(oldcomm, sizeof(oldcomm), &tsk->comm);
tsk                29 samples/bpf/test_overhead_kprobe_kern.c 	signal = _(tsk->signal);
tsk               154 security/apparmor/audit.c 			sa->type == LSM_AUDIT_DATA_TASK && sa->u.tsk ?
tsk               155 security/apparmor/audit.c 				    sa->u.tsk : current);
tsk               109 security/apparmor/file.c 	sa.u.tsk = NULL;
tsk               117 security/apparmor/file.c 	sa.u.tsk = NULL;
tsk               164 security/apparmor/include/audit.h 	.u.tsk = NULL,							\
tsk               305 security/lsm_audit.c 		struct task_struct *tsk = a->u.tsk;
tsk               306 security/lsm_audit.c 		if (tsk) {
tsk               307 security/lsm_audit.c 			pid_t pid = task_tgid_nr(tsk);
tsk               309 security/lsm_audit.c 				char comm[sizeof(tsk->comm)];
tsk               312 security/lsm_audit.c 				    memcpy(comm, tsk->comm, sizeof(comm)));
tsk              1482 security/security.c int security_file_send_sigiotask(struct task_struct *tsk,
tsk              1485 security/security.c 	return call_int_hook(file_send_sigiotask, 0, tsk, fown, sig);
tsk              3824 security/selinux/hooks.c static int selinux_file_send_sigiotask(struct task_struct *tsk,
tsk              3828 security/selinux/hooks.c 	u32 sid = task_sid(tsk);
tsk               465 security/smack/smack.h 	a->a.u.tsk = t;
tsk              1781 security/smack/smack_lsm.c static int smack_file_send_sigiotask(struct task_struct *tsk,
tsk              1786 security/smack/smack_lsm.c 	struct smack_known *tkp = smk_of_task(smack_cred(tsk->cred));
tsk              1804 security/smack/smack_lsm.c 	tcred = __task_cred(tsk);
tsk              1810 security/smack/smack_lsm.c 	smk_ad_setfield_u_tsk(&ad, tsk);
tsk                48 tools/include/linux/lockdep.h #define task_pid_nr(tsk) ((tsk)->pid)