current 56 arch/alpha/include/asm/cacheflush.h if (current->active_mm == mm) current 194 arch/alpha/include/asm/mmu_context.h struct mm_struct * mm = current->active_mm; \ current 239 arch/alpha/include/asm/mmu_context.h if (tsk != current) current 19 arch/alpha/include/asm/processor.h (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) current 27 arch/alpha/include/asm/processor.h ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) current 50 arch/alpha/include/asm/processor.h ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) current 85 arch/alpha/include/asm/tlbflush.h flush_tlb_current(current->active_mm); current 110 arch/alpha/include/asm/tlbflush.h if (mm == current->active_mm) current 122 arch/alpha/include/asm/tlbflush.h if (mm == current->active_mm) current 75 arch/alpha/kernel/osf_sys.c mm = current->mm; current 186 arch/alpha/kernel/osf_sys.c current->comm, flags); current 688 arch/alpha/kernel/osf_sys.c unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size; current 702 arch/alpha/kernel/osf_sys.c if (current->sas_ss_sp && on_sig_stack(usp)) current 708 arch/alpha/kernel/osf_sys.c current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current 709 arch/alpha/kernel/osf_sys.c current->sas_ss_size = SIGSTKSZ; current 881 arch/alpha/kernel/osf_sys.c 0, current); current 1155 arch/alpha/kernel/osf_sys.c task_cputime(current, &utime, &stime); current 1160 arch/alpha/kernel/osf_sys.c r.ru_minflt = current->min_flt; current 1161 arch/alpha/kernel/osf_sys.c r.ru_majflt = current->maj_flt; current 1164 arch/alpha/kernel/osf_sys.c utime_jiffies = nsecs_to_jiffies(current->signal->cutime); current 1165 arch/alpha/kernel/osf_sys.c stime_jiffies = nsecs_to_jiffies(current->signal->cstime); current 1168 arch/alpha/kernel/osf_sys.c r.ru_minflt = current->signal->cmin_flt; current 1169 arch/alpha/kernel/osf_sys.c r.ru_majflt = current->signal->cmaj_flt; current 1306 arch/alpha/kernel/osf_sys.c if (current->personality & ADDR_LIMIT_32BIT) current 1368 arch/alpha/kernel/osf_sys.c if (unlikely(personality(current->personality) == PER_OSF4)) current 1380 arch/alpha/kernel/osf_sys.c if (unlikely(personality(current->personality) == PER_OSF4)) current 50 arch/alpha/kernel/pci-sysfs.c current->comm, sparse ? " sparse" : "", start, start + nr, current 266 arch/alpha/kernel/pci-sysfs.c current->comm, sparse ? " sparse" : "", start, start + nr, current 385 arch/alpha/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 155 arch/alpha/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 221 arch/alpha/kernel/signal.c if (ptrace_cancel_bpt (current)) { current 223 arch/alpha/kernel/signal.c current); current 249 arch/alpha/kernel/signal.c if (ptrace_cancel_bpt (current)) { current 251 arch/alpha/kernel/signal.c current); current 367 arch/alpha/kernel/signal.c current->comm, current->pid, frame, regs->pc, regs->r26); current 421 arch/alpha/kernel/signal.c current->comm, current->pid, frame, regs->pc, regs->r26); current 485 arch/alpha/kernel/signal.c unsigned long single_stepping = ptrace_cancel_bpt(current); current 491 arch/alpha/kernel/signal.c single_stepping |= ptrace_cancel_bpt(current); current 497 arch/alpha/kernel/signal.c single_stepping |= ptrace_cancel_bpt(current); current 518 arch/alpha/kernel/signal.c ptrace_set_bpt(current); /* re-set breakpoint */ current 149 arch/alpha/kernel/smp.c current->active_mm = &init_mm; current 169 arch/alpha/kernel/smp.c cpuid, current, current->active_mm)); current 641 arch/alpha/kernel/smp.c if (mm == current->active_mm && !asn_locked()) current 652 arch/alpha/kernel/smp.c if (mm == current->active_mm) { current 685 arch/alpha/kernel/smp.c if (mm == current->active_mm && !asn_locked()) current 699 arch/alpha/kernel/smp.c if (mm == current->active_mm) { current 736 arch/alpha/kernel/smp.c if (mm == current->active_mm && !asn_locked()) current 753 arch/alpha/kernel/smp.c if (mm == current->active_mm) { current 184 arch/alpha/kernel/traps.c printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); current 230 arch/alpha/kernel/traps.c send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current); current 267 arch/alpha/kernel/traps.c if (ptrace_cancel_bpt(current)) { current 272 arch/alpha/kernel/traps.c current); current 277 arch/alpha/kernel/traps.c current); current 339 arch/alpha/kernel/traps.c current); current 365 arch/alpha/kernel/traps.c current); current 390 arch/alpha/kernel/traps.c send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, current); current 602 arch/alpha/kernel/traps.c current->comm, task_pid_nr(current)); current 738 arch/alpha/kernel/traps.c current->comm, task_pid_nr(current), current 959 arch/alpha/kernel/traps.c struct mm_struct *mm = current->mm; current 967 arch/alpha/kernel/traps.c send_sig_fault(SIGSEGV, si_code, va, 0, current); current 972 arch/alpha/kernel/traps.c send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current); current 88 arch/alpha/mm/fault.c struct mm_struct *mm = current->mm; current 153 arch/alpha/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 168 arch/alpha/mm/fault.c current->maj_flt++; current 170 arch/alpha/mm/fault.c current->min_flt++; current 243 arch/alpha/mm/fault.c pgd = current->active_mm->pgd + index; current 145 arch/arc/include/asm/entry-arcv2.h ; - K mode: add the offset from current SP where H/w starts auto push current 60 arch/arc/kernel/kgdb.c current->thread.callee_reg); current 66 arch/arc/kernel/kgdb.c current->thread.callee_reg); current 114 arch/arc/kernel/kgdb.c current->thread.callee_reg, current 171 arch/arc/kernel/kprobes.c (struct callee_regs *) current->thread.callee_reg, current 406 arch/arc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 422 arch/arc/kernel/kprobes.c if (ri->task != current) current 445 arch/arc/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 25 arch/arc/kernel/process.c task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr; current 41 arch/arc/kernel/process.c return task_thread_info(current)->thr_ptr; current 91 arch/arc/kernel/process.c down_read(¤t->mm->mmap_sem); current 92 arch/arc/kernel/process.c ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr, current 94 arch/arc/kernel/process.c up_read(¤t->mm->mmap_sem); current 238 arch/arc/kernel/process.c task_thread_info(current)->thr_ptr; current 158 arch/arc/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 176 arch/arc/kernel/smp.c current->active_mm = mm; current 51 arch/arc/kernel/stacktrace.c frame_info->task = current; current 46 arch/arc/kernel/traps.c struct task_struct *tsk = current; current 87 arch/arc/kernel/troubleshoot.c struct mm_struct *active_mm = current->active_mm; current 126 arch/arc/kernel/troubleshoot.c address = current->thread.fault_address; current 180 arch/arc/kernel/troubleshoot.c struct task_struct *tsk = current; current 195 arch/arc/kernel/troubleshoot.c current->thread.fault_address, current 228 arch/arc/kernel/troubleshoot.c cregs = (struct callee_regs *)current->thread.callee_reg; current 238 arch/arc/kernel/troubleshoot.c current->thread.fault_address = address; current 248 arch/arc/kernel/troubleshoot.c show_stacktrace(current, regs); current 214 arch/arc/kernel/unaligned.c get_task_comm(buf, current), task_pid_nr(current)); current 218 arch/arc/kernel/unaligned.c get_task_comm(buf, current), task_pid_nr(current), current 36 arch/arc/mm/fault.c pgd = pgd_offset_fast(current->active_mm, address); current 64 arch/arc/mm/fault.c struct task_struct *tsk = current; current 140 arch/arc/mm/fault.c if (fatal_signal_pending(current)) { current 32 arch/arc/mm/mmap.c struct mm_struct *mm = current->mm; current 312 arch/arc/mm/tlb.c if (current->mm == mm) current 547 arch/arc/mm/tlb.c if (current->active_mm != vma->vm_mm) current 278 arch/arm/common/bL_switcher.c sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); current 282 arch/arm/common/bL_switcher.c if (signal_pending(current)) current 283 arch/arm/common/bL_switcher.c flush_signals(current); current 144 arch/arm/include/asm/elf.h (elf_addr_t)current->mm->context.vdso); \ current 74 arch/arm/include/asm/mmu_context.h struct mm_struct *mm = current->mm; current 165 arch/arm/include/asm/page.h (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ current 19 arch/arm/include/asm/processor.h #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ current 62 arch/arm/include/asm/processor.h current->personality & FDPIC_FUNCPTRS) { \ current 66 arch/arm/include/asm/processor.h regs->ARM_r10 = current->mm->start_data; \ current 68 arch/arm/include/asm/processor.h regs->ARM_r10 = current->mm->start_data; \ current 69 arch/arm/include/asm/processor.h if (current->personality & ADDR_LIMIT_32BIT) \ current 39 arch/arm/include/asm/stackprotector.h current->stack_canary = canary; current 41 arch/arm/include/asm/stackprotector.h __stack_chk_guard = current->stack_canary; current 43 arch/arm/include/asm/stackprotector.h current_thread_info()->stack_canary = current->stack_canary; current 46 arch/arm/kernel/elf.c unsigned int personality = current->personality & ~PER_MASK; current 188 arch/arm/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 74 arch/arm/kernel/perf_callchain.c if (!current->mm) current 38 arch/arm/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 39 arch/arm/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 207 arch/arm/kernel/process.c struct task_struct *tsk = current; current 303 arch/arm/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 405 arch/arm/kernel/process.c current->mm->context.sigpage = new_vma->vm_start; current 417 arch/arm/kernel/process.c struct mm_struct *mm = current->mm; current 386 arch/arm/kernel/ptrace.c if (current->thread.debug.hbp[i] == bp) current 229 arch/arm/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 259 arch/arm/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 313 arch/arm/kernel/signal.c .trap_no = current->thread.trap_no, current 314 arch/arm/kernel/signal.c .error_code = current->thread.error_code, current 315 arch/arm/kernel/signal.c .fault_address = current->thread.address, current 371 arch/arm/kernel/signal.c (current->personality & FDPIC_FUNCPTRS); current 453 arch/arm/kernel/signal.c struct mm_struct *mm = current->mm; current 361 arch/arm/kernel/smp.c : "r" (task_stack_page(current) + THREAD_SIZE - 8)); current 397 arch/arm/kernel/smp.c enter_lazy_tlb(mm, current); current 406 arch/arm/kernel/smp.c current->active_mm = mm; current 112 arch/arm/kernel/stacktrace.c if (tsk != current) { current 163 arch/arm/kernel/stacktrace.c __save_stack_trace(current, trace, 0); current 22 arch/arm/kernel/suspend.c struct mm_struct *mm = current->active_mm; current 100 arch/arm/kernel/swp_emulate.c down_read(¤t->mm->mmap_sem); current 101 arch/arm/kernel/swp_emulate.c if (find_vma(current->mm, addr) == NULL) current 105 arch/arm/kernel/swp_emulate.c up_read(¤t->mm->mmap_sem); current 138 arch/arm/kernel/swp_emulate.c if (likely(res != -EAGAIN) || signal_pending(current)) current 181 arch/arm/kernel/swp_emulate.c if (current->pid != previous_pid) { current 183 arch/arm/kernel/swp_emulate.c current->comm, (unsigned long)current->pid); current 184 arch/arm/kernel/swp_emulate.c previous_pid = current->pid; current 215 arch/arm/kernel/traps.c tsk = current; current 220 arch/arm/kernel/traps.c } else if (tsk != current) { current 267 arch/arm/kernel/traps.c struct task_struct *tsk = current; current 323 arch/arm/kernel/traps.c if (regs && kexec_should_crash(current)) current 369 arch/arm/kernel/traps.c current->thread.error_code = err; current 370 arch/arm/kernel/traps.c current->thread.trap_no = trap; current 479 arch/arm/kernel/traps.c current->comm, task_pid_nr(current), pc); current 533 arch/arm/kernel/traps.c if ((current->personality & PER_MASK) != PER_LINUX) { current 534 arch/arm/kernel/traps.c send_sig(SIGSEGV, current, 1); current 541 arch/arm/kernel/traps.c task_pid_nr(current), current->comm, n); current 562 arch/arm/kernel/traps.c if (fatal_signal_pending(current)) current 661 arch/arm/kernel/traps.c task_pid_nr(current), current->comm, no); current 727 arch/arm/kernel/traps.c task_pid_nr(current), current->comm, code, instr); current 729 arch/arm/kernel/traps.c show_pte(KERN_ERR, current->mm, addr); current 465 arch/arm/kernel/unwind.c tsk = current; current 472 arch/arm/kernel/unwind.c } else if (tsk == current) { current 60 arch/arm/kernel/vdso.c current->mm->context.vdso = new_vma->vm_start; current 32 arch/arm/lib/uaccess_with_memcpy.c pgd = pgd_offset(current->mm, addr); current 55 arch/arm/lib/uaccess_with_memcpy.c ptl = ¤t->mm->page_table_lock; current 71 arch/arm/lib/uaccess_with_memcpy.c pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); current 99 arch/arm/lib/uaccess_with_memcpy.c down_read(¤t->mm->mmap_sem); current 107 arch/arm/lib/uaccess_with_memcpy.c up_read(¤t->mm->mmap_sem); current 111 arch/arm/lib/uaccess_with_memcpy.c down_read(¤t->mm->mmap_sem); current 131 arch/arm/lib/uaccess_with_memcpy.c up_read(¤t->mm->mmap_sem); current 168 arch/arm/lib/uaccess_with_memcpy.c down_read(¤t->mm->mmap_sem); current 175 arch/arm/lib/uaccess_with_memcpy.c up_read(¤t->mm->mmap_sem); current 178 arch/arm/lib/uaccess_with_memcpy.c down_read(¤t->mm->mmap_sem); current 196 arch/arm/lib/uaccess_with_memcpy.c up_read(¤t->mm->mmap_sem); current 248 arch/arm/mach-rpc/ecard.c struct mm_struct *active_mm = current->active_mm; current 253 arch/arm/mach-rpc/ecard.c current->mm = mm; current 254 arch/arm/mach-rpc/ecard.c current->active_mm = mm; current 968 arch/arm/mm/alignment.c "Address=0x%08lx FSR 0x%03x\n", current->comm, current 969 arch/arm/mm/alignment.c task_pid_nr(current), instrptr, current 138 arch/arm/mm/fault.c struct task_struct *tsk = current; current 168 arch/arm/mm/fault.c struct task_struct *tsk = current; current 249 arch/arm/mm/fault.c tsk = current; current 298 arch/arm/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { current 539 arch/arm/mm/fault.c show_pte(KERN_ALERT, current->mm, addr); current 239 arch/arm/mm/flush.c struct mm_struct *mm = current->active_mm; current 639 arch/arm/mm/init.c set_section_perms(perms, n, true, current->active_mm); current 675 arch/arm/mm/init.c current->active_mm); current 684 arch/arm/mm/init.c current->active_mm); current 181 arch/arm/mm/ioremap.c if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) current 182 arch/arm/mm/ioremap.c __check_vmalloc_seq(current->active_mm); current 33 arch/arm/mm/mmap.c struct mm_struct *mm = current->mm; current 86 arch/arm/mm/mmap.c struct mm_struct *mm = current->mm; current 139 arch/arm/nwfpe/fpmodule.c current->comm, current->pid, flags, current 166 arch/arm/nwfpe/fpmodule.c fp_send_sig(SIGFPE, current, 1); current 424 arch/arm/probes/kprobes/core.c kretprobe_hash_lock(current, &head, &flags); current 440 arch/arm/probes/kprobes/core.c if (ri->task != current) current 459 arch/arm/probes/kprobes/core.c if (ri->task != current) current 483 arch/arm/probes/kprobes/core.c kretprobe_hash_unlock(current, &flags); current 135 arch/arm/probes/uprobes/core.c struct uprobe_task *utask = current->utask; current 140 arch/arm/probes/uprobes/core.c utask->autask.saved_trap_no = current->thread.trap_no; current 141 arch/arm/probes/uprobes/core.c current->thread.trap_no = UPROBE_TRAP_NR; current 149 arch/arm/probes/uprobes/core.c struct uprobe_task *utask = current->utask; current 151 arch/arm/probes/uprobes/core.c WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); current 153 arch/arm/probes/uprobes/core.c current->thread.trap_no = utask->autask.saved_trap_no; current 172 arch/arm/probes/uprobes/core.c struct uprobe_task *utask = current->utask; current 174 arch/arm/probes/uprobes/core.c current->thread.trap_no = utask->autask.saved_trap_no; current 220 arch/arm/vfp/vfpmodule.c current->thread.error_code = 0; current 221 arch/arm/vfp/vfpmodule.c current->thread.trap_no = 6; current 225 arch/arm/vfp/vfpmodule.c current); current 134 arch/arm64/include/asm/compat.h #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) current 141 arch/arm64/include/asm/efi.h if (mm != current->active_mm) { current 149 arch/arm64/include/asm/efi.h update_saved_ttbr0(current, mm); current 159 arch/arm64/include/asm/efi.h update_saved_ttbr0(current, current->active_mm); current 139 arch/arm64/include/asm/elf.h current->personality &= ~READ_IMPLIES_EXEC; \ current 146 arch/arm64/include/asm/elf.h (elf_addr_t)current->mm->context.vdso); \ current 215 arch/arm64/include/asm/elf.h (Elf64_Off)current->mm->context.vdso); \ current 115 arch/arm64/include/asm/mmu_context.h struct mm_struct *mm = current->active_mm; current 246 arch/arm64/include/asm/mmu_context.h #define activate_mm(prev,next) switch_mm(prev, next, current) current 36 arch/arm64/include/asm/page.h (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ current 255 arch/arm64/include/asm/pgtable.h if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) current 322 arch/arm64/include/asm/processor.h BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \ current 325 arch/arm64/include/asm/processor.h #define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL)) current 36 arch/arm64/include/asm/stackprotector.h current->stack_canary = canary; current 38 arch/arm64/include/asm/stackprotector.h __stack_chk_guard = current->stack_canary; current 150 arch/arm64/include/asm/stacktrace.h if (tsk != current || preemptible()) current 71 arch/arm64/include/asm/uaccess.h (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) current 338 arch/arm64/kernel/armv8_deprecated.c if (likely(res != -EAGAIN) || signal_pending(current)) current 426 arch/arm64/kernel/armv8_deprecated.c current->comm, (unsigned long)current->pid, regs->pc); current 507 arch/arm64/kernel/armv8_deprecated.c current->comm, (unsigned long)current->pid, regs->pc); current 575 arch/arm64/kernel/armv8_deprecated.c current->comm, (unsigned long)current->pid, regs->pc); current 130 arch/arm64/kernel/cpuinfo.c bool compat = personality(current->personality) == PER_LINUX32; current 263 arch/arm64/kernel/debug-monitors.c user_rewind_single_step(current); current 276 arch/arm64/kernel/fpsimd.c sve_load_state(sve_pffr(¤t->thread), current 277 arch/arm64/kernel/fpsimd.c ¤t->thread.uw.fpsimd_state.fpsr, current 278 arch/arm64/kernel/fpsimd.c sve_vq_from_vl(current->thread.sve_vl) - 1); current 280 arch/arm64/kernel/fpsimd.c fpsimd_load_state(¤t->thread.uw.fpsimd_state); current 501 arch/arm64/kernel/fpsimd.c memset(task->thread.sve_state, 0, sve_state_size(current)); current 611 arch/arm64/kernel/fpsimd.c if (task == current) { current 621 arch/arm64/kernel/fpsimd.c if (task == current) current 650 arch/arm64/kernel/fpsimd.c ret = current->thread.sve_vl_onexec; current 652 arch/arm64/kernel/fpsimd.c ret = current->thread.sve_vl; current 672 arch/arm64/kernel/fpsimd.c ret = sve_set_vector_length(current, vl, flags); current 933 arch/arm64/kernel/fpsimd.c sve_alloc(current); current 940 arch/arm64/kernel/fpsimd.c fpsimd_flush_task_state(current); current 942 arch/arm64/kernel/fpsimd.c fpsimd_to_sve(current); current 980 arch/arm64/kernel/fpsimd.c current); current 1019 arch/arm64/kernel/fpsimd.c fpsimd_flush_task_state(current); current 1020 arch/arm64/kernel/fpsimd.c memset(¤t->thread.uw.fpsimd_state, 0, current 1021 arch/arm64/kernel/fpsimd.c sizeof(current->thread.uw.fpsimd_state)); current 1025 arch/arm64/kernel/fpsimd.c sve_free(current); current 1038 arch/arm64/kernel/fpsimd.c vl = current->thread.sve_vl_onexec ? current 1039 arch/arm64/kernel/fpsimd.c current->thread.sve_vl_onexec : sve_default_vl; current 1048 arch/arm64/kernel/fpsimd.c current->thread.sve_vl = vl; current 1055 arch/arm64/kernel/fpsimd.c current->thread.sve_vl_onexec = 0; current 1084 arch/arm64/kernel/fpsimd.c sve_to_fpsimd(current); current 1098 arch/arm64/kernel/fpsimd.c last->st = ¤t->thread.uw.fpsimd_state; current 1099 arch/arm64/kernel/fpsimd.c last->sve_state = current->thread.sve_state; current 1100 arch/arm64/kernel/fpsimd.c last->sve_vl = current->thread.sve_vl; current 1101 arch/arm64/kernel/fpsimd.c current->thread.fpsimd_cpu = smp_processor_id(); current 1171 arch/arm64/kernel/fpsimd.c current->thread.uw.fpsimd_state = *state; current 1173 arch/arm64/kernel/fpsimd.c fpsimd_to_sve(current); current 232 arch/arm64/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 227 arch/arm64/kernel/hw_breakpoint.c struct debug_info *debug_info = ¤t->thread.debug; current 632 arch/arm64/kernel/hw_breakpoint.c debug_info = ¤t->thread.debug; current 677 arch/arm64/kernel/hw_breakpoint.c user_enable_single_step(current); current 746 arch/arm64/kernel/hw_breakpoint.c debug_info = ¤t->thread.debug; current 820 arch/arm64/kernel/hw_breakpoint.c user_enable_single_step(current); current 845 arch/arm64/kernel/hw_breakpoint.c struct debug_info *debug_info = ¤t->thread.debug; current 874 arch/arm64/kernel/hw_breakpoint.c user_disable_single_step(current); current 914 arch/arm64/kernel/hw_breakpoint.c current_debug_info = ¤t->thread.debug; current 158 arch/arm64/kernel/perf_callchain.c walk_stackframe(current, &frame, callchain_trace, entry); current 60 arch/arm64/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 61 arch/arm64/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 486 arch/arm64/kernel/probes/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 502 arch/arm64/kernel/probes/kprobes.c if (ri->task != current) current 521 arch/arm64/kernel/probes/kprobes.c if (ri->task != current) current 545 arch/arm64/kernel/probes/kprobes.c kretprobe_hash_unlock(current, &flags); current 64 arch/arm64/kernel/probes/uprobes.c struct uprobe_task *utask = current->utask; current 67 arch/arm64/kernel/probes/uprobes.c current->thread.fault_code = UPROBE_INV_FAULT_CODE; current 72 arch/arm64/kernel/probes/uprobes.c user_enable_single_step(current); current 79 arch/arm64/kernel/probes/uprobes.c struct uprobe_task *utask = current->utask; current 81 arch/arm64/kernel/probes/uprobes.c WARN_ON_ONCE(current->thread.fault_code != UPROBE_INV_FAULT_CODE); current 86 arch/arm64/kernel/probes/uprobes.c user_disable_single_step(current); current 122 arch/arm64/kernel/probes/uprobes.c struct uprobe_task *utask = current->utask; current 130 arch/arm64/kernel/probes/uprobes.c user_disable_single_step(current); current 180 arch/arm64/kernel/probes/uprobes.c struct uprobe_task *utask = current->utask; current 302 arch/arm64/kernel/process.c current->thread.uw.tp_value = 0; current 324 arch/arm64/kernel/process.c flush_ptrace_hw_breakpoint(current); current 339 arch/arm64/kernel/process.c if (current->mm) current 428 arch/arm64/kernel/process.c *task_user_tls(current) = read_sysreg(tpidr_el0); current 537 arch/arm64/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 562 arch/arm64/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 572 arch/arm64/kernel/process.c current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; current 574 arch/arm64/kernel/process.c ptrauth_thread_init_user(current); current 183 arch/arm64/kernel/ptrace.c if (current->thread.debug.hbp_break[i] == bp) { current 190 arch/arm64/kernel/ptrace.c if (current->thread.debug.hbp_watch[i] == bp) { current 650 arch/arm64/kernel/ptrace.c if (target == current) current 708 arch/arm64/kernel/ptrace.c if (target == current) current 818 arch/arm64/kernel/ptrace.c if (target == current) current 1370 arch/arm64/kernel/ptrace.c if (target == current) current 46 arch/arm64/kernel/return_address.c walk_stackframe(current, &frame, save_return_addr, &data); current 173 arch/arm64/kernel/signal.c ¤t->thread.uw.fpsimd_state; current 229 arch/arm64/kernel/signal.c unsigned int vl = current->thread.sve_vl; current 250 arch/arm64/kernel/signal.c current->thread.sve_state, current 267 arch/arm64/kernel/signal.c if (sve.vl != current->thread.sve_vl) current 287 arch/arm64/kernel/signal.c fpsimd_flush_task_state(current); current 290 arch/arm64/kernel/signal.c sve_alloc(current); current 291 arch/arm64/kernel/signal.c err = __copy_from_user(current->thread.sve_state, current 505 arch/arm64/kernel/signal.c err |= !valid_user_regs(®s->user_regs, current); current 532 arch/arm64/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 577 arch/arm64/kernel/signal.c if (add_all || current->thread.fault_code) { current 591 arch/arm64/kernel/signal.c vl = current->thread.sve_vl; current 622 arch/arm64/kernel/signal.c __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); current 639 arch/arm64/kernel/signal.c __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); current 736 arch/arm64/kernel/signal.c sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); current 785 arch/arm64/kernel/signal.c struct task_struct *tsk = current; current 807 arch/arm64/kernel/signal.c ret |= !valid_user_regs(®s->user_regs, current); current 893 arch/arm64/kernel/signal.c user_rewind_single_step(current); current 97 arch/arm64/kernel/signal32.c ¤t->thread.uw.fpsimd_state; current 223 arch/arm64/kernel/signal32.c err |= !valid_user_regs(®s->user_regs, current); current 238 arch/arm64/kernel/signal32.c current->restart_block.fn = do_no_restart_syscall; current 269 arch/arm64/kernel/signal32.c current->restart_block.fn = do_no_restart_syscall; current 346 arch/arm64/kernel/signal32.c void *vdso_base = current->mm->context.vdso; current 374 arch/arm64/kernel/signal32.c retcode = (unsigned long)current->mm->context.vdso + current 413 arch/arm64/kernel/signal32.c __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << current 415 arch/arm64/kernel/signal32.c __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); current 199 arch/arm64/kernel/smp.c cpu = task_cpu(current); current 207 arch/arm64/kernel/smp.c current->active_mm = mm; current 50 arch/arm64/kernel/stacktrace.c tsk = current; current 167 arch/arm64/kernel/stacktrace.c walk_stackframe(current, &frame, save_trace, &data); current 184 arch/arm64/kernel/stacktrace.c if (tsk != current) { current 208 arch/arm64/kernel/stacktrace.c __save_stack_trace(current, trace, 0); current 60 arch/arm64/kernel/suspend.c uao_thread_switch(current); current 32 arch/arm64/kernel/sys_compat.c if (fatal_signal_pending(current)) current 92 arch/arm64/kernel/sys_compat.c current->thread.uw.tp_value = regs->regs[0]; current 99 arch/arm64/kernel/traps.c tsk = current; current 104 arch/arm64/kernel/traps.c if (tsk == current) { current 190 arch/arm64/kernel/traps.c if (regs && kexec_should_crash(current)) current 212 arch/arm64/kernel/traps.c struct task_struct *tsk = current; current 262 arch/arm64/kernel/traps.c current->thread.fault_address = 0; current 263 arch/arm64/kernel/traps.c current->thread.fault_code = err; current 280 arch/arm64/kernel/traps.c user_fastforward_single_step(current); current 386 arch/arm64/kernel/traps.c down_read(¤t->mm->mmap_sem); current 387 arch/arm64/kernel/traps.c if (find_vma(current->mm, addr) == NULL) current 391 arch/arm64/kernel/traps.c up_read(¤t->mm->mmap_sem); current 809 arch/arm64/kernel/traps.c current->thread.fault_address = 0; current 810 arch/arm64/kernel/traps.c current->thread.fault_code = esr; current 823 arch/arm64/kernel/traps.c unsigned long tsk_stk = (unsigned long)current->stack; current 95 arch/arm64/kernel/vdso.c current->mm->context.vdso = (void *)new_vma->vm_start; current 346 arch/arm64/kernel/vdso.c struct mm_struct *mm = current->mm; current 407 arch/arm64/kernel/vdso.c struct mm_struct *mm = current->mm; current 31 arch/arm64/kvm/fpsimd.c struct thread_info *ti = ¤t->thread_info; current 32 arch/arm64/kvm/fpsimd.c struct user_fpsimd_state *fpsimd = ¤t->thread.uw.fpsimd_state; current 64 arch/arm64/kvm/fpsimd.c BUG_ON(!current->mm); current 136 arch/arm64/mm/fault.c mm = current->active_mm; current 334 arch/arm64/mm/fault.c current->thread.fault_address = address; current 348 arch/arm64/mm/fault.c if (!is_ttbr0_addr(current->thread.fault_address)) { current 386 arch/arm64/mm/fault.c current->thread.fault_code = esr; current 455 arch/arm64/mm/fault.c struct mm_struct *mm = current->mm; current 532 arch/arm64/mm/fault.c if (fatal_signal_pending(current)) { current 562 arch/arm64/mm/fault.c current->maj_flt++; current 566 arch/arm64/mm/fault.c current->min_flt++; current 7 arch/c6x/include/asm/page.h ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ current 23 arch/c6x/include/asm/switch_to.h current->thread.wchan = (u_long) __builtin_return_address(0); \ current 27 arch/c6x/include/asm/switch_to.h current->thread.wchan = 0; \ current 101 arch/c6x/kernel/process.c current->thread.usp = usp; current 68 arch/c6x/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 31 arch/c6x/kernel/sys_c6x.c current->pid, addr, size); current 75 arch/c6x/kernel/traps.c current->comm, current->pid, (PAGE_SIZE + current 76 arch/c6x/kernel/traps.c (unsigned long) current)); current 382 arch/c6x/kernel/traps.c if (task && task != current) current 28 arch/csky/abiv1/mmap.c struct mm_struct *mm = current->mm; current 27 arch/csky/include/asm/mmu_context.h #define activate_mm(prev,next) switch_mm(prev, next, current) current 145 arch/csky/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 15 arch/csky/kernel/perf_callchain.c unsigned long low = (unsigned long)task_stack_page(current); current 38 arch/csky/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 39 arch/csky/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 80 arch/csky/kernel/process.c memcpy(fpu, ¤t->thread.user_fp, sizeof(*fpu)); current 101 arch/csky/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 219 arch/csky/kernel/ptrace.c syscall_set_nr(current, regs, -1); current 222 arch/csky/kernel/ptrace.c trace_sys_enter(regs, syscall_get_nr(current, regs)); current 235 arch/csky/kernel/ptrace.c trace_sys_exit(regs, syscall_get_return_value(current, regs)); current 246 arch/csky/kernel/ptrace.c pr_info("COMM=%s PID=%d\n", current->comm, current->pid); current 248 arch/csky/kernel/ptrace.c if (current->mm) { current 250 arch/csky/kernel/ptrace.c (int) current->mm->start_code, current 251 arch/csky/kernel/ptrace.c (int) current->mm->end_code, current 252 arch/csky/kernel/ptrace.c (int) current->mm->start_data, current 253 arch/csky/kernel/ptrace.c (int) current->mm->end_data, current 254 arch/csky/kernel/ptrace.c (int) current->mm->end_data, current 255 arch/csky/kernel/ptrace.c (int) current->mm->brk); current 257 arch/csky/kernel/ptrace.c (int) current->mm->start_stack, current 258 arch/csky/kernel/ptrace.c (int) (((unsigned long) current) + 2 * PAGE_SIZE)); current 72 arch/csky/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 137 arch/csky/kernel/signal.c struct csky_vdso *vdso = current->mm->context.vdso; current 229 arch/csky/kernel/smp.c current->active_mm = mm; current 11 arch/csky/kernel/stacktrace.c save_stack_trace_tsk(current, trace); current 23 arch/csky/kernel/stacktrace.c if (tsk == current) { current 8 arch/csky/kernel/syscall.c struct thread_info *ti = task_thread_info(current); current 118 arch/csky/kernel/traps.c struct task_struct *tsk = current; current 176 arch/csky/kernel/traps.c send_sig(sig, current, 0); current 51 arch/csky/kernel/vdso.c struct mm_struct *mm = current->mm; current 50 arch/csky/mm/fault.c struct task_struct *tsk = current; current 84 arch/h8300/include/asm/processor.h (_regs)->er5 = current->mm->start_data; /* GOT base */ \ current 94 arch/h8300/include/asm/processor.h (_regs)->er5 = current->mm->start_data; /* GOT base */ \ current 118 arch/h8300/include/asm/processor.h #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) current 29 arch/h8300/include/asm/ptrace.h #define signal_pt_regs() ((struct pt_regs *)current->thread.esp0) current 139 arch/h8300/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 251 arch/h8300/kernel/ptrace_h.c if ((unsigned long)current->thread.breakinfo.addr == bp) { current 252 arch/h8300/kernel/ptrace_h.c user_disable_single_step(current); current 83 arch/h8300/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 205 arch/h8300/kernel/signal.c regs->er5 = current->mm->start_data; /* GOT base */ current 266 arch/h8300/kernel/signal.c current->thread.esp0 = (unsigned long) regs; current 48 arch/h8300/kernel/traps.c current->thread.esp0 = ssp; current 62 arch/h8300/kernel/traps.c pr_info("COMM=%s PID=%d\n", current->comm, current->pid); current 63 arch/h8300/kernel/traps.c if (current->mm) { current 65 arch/h8300/kernel/traps.c (int) current->mm->start_code, current 66 arch/h8300/kernel/traps.c (int) current->mm->end_code, current 67 arch/h8300/kernel/traps.c (int) current->mm->start_data, current 68 arch/h8300/kernel/traps.c (int) current->mm->end_data, current 69 arch/h8300/kernel/traps.c (int) current->mm->end_data, current 70 arch/h8300/kernel/traps.c (int) current->mm->brk); current 72 arch/h8300/kernel/traps.c (int) current->mm->start_stack, current 73 arch/h8300/kernel/traps.c (int) PAGE_SIZE+(unsigned long)current); current 94 arch/h8300/kernel/traps.c if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE)) current 79 arch/hexagon/include/asm/pgalloc.h current->active_mm->context.generation = kmap_generation; current 89 arch/hexagon/include/asm/pgalloc.h ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; current 138 arch/hexagon/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 101 arch/hexagon/kernel/signal.c struct hexagon_vdso *vdso = current->mm->context.vdso; current 231 arch/hexagon/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 154 arch/hexagon/kernel/smp.c current->active_mm = &init_mm; current 29 arch/hexagon/kernel/stacktrace.c low = (unsigned long)task_stack_page(current); current 94 arch/hexagon/kernel/traps.c task = current; current 101 arch/hexagon/kernel/traps.c if (task == current) { current 210 arch/hexagon/kernel/traps.c do_show_stack(current, ®s->r30, pt_elr(regs)); current 53 arch/hexagon/kernel/vdso.c struct mm_struct *mm = current->mm; current 39 arch/hexagon/mm/vm_fault.c struct mm_struct *mm = current->mm; current 94 arch/hexagon/mm/vm_fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 101 arch/hexagon/mm/vm_fault.c current->maj_flt++; current 103 arch/hexagon/mm/vm_fault.c current->min_flt++; current 30 arch/hexagon/mm/vm_tlb.c if (mm->context.ptbase == current->active_mm->context.ptbase) current 59 arch/hexagon/mm/vm_tlb.c if (current->active_mm->context.ptbase == mm->context.ptbase) current 70 arch/hexagon/mm/vm_tlb.c if (mm->context.ptbase == current->active_mm->context.ptbase) current 223 arch/ia64/include/asm/page.h (((current->personality & READ_IMPLIES_EXEC) != 0) \ current 41 arch/ia64/include/asm/processor.h #define TASK_UNMAPPED_BASE (current->thread.map_base) current 316 arch/ia64/include/asm/processor.h regs->ar_bspstore = current->thread.rbs_bot; \ current 319 arch/ia64/include/asm/processor.h regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ current 321 arch/ia64/include/asm/processor.h if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \ current 112 arch/ia64/include/asm/ptrace.h # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0) current 69 arch/ia64/include/asm/switch_to.h if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ current 70 arch/ia64/include/asm/switch_to.h (task_cpu(current) != \ current 71 arch/ia64/include/asm/switch_to.h task_thread_info(current)->last_cpu))) { \ current 72 arch/ia64/include/asm/switch_to.h task_thread_info(current)->last_cpu = task_cpu(current); \ current 57 arch/ia64/include/asm/thread_info.h #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) current 67 arch/ia64/include/asm/tlbflush.h if (mm == current->active_mm) current 106 arch/ia64/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) current 10 arch/ia64/include/uapi/asm/fcntl.h (personality(current->personality) != PER_LINUX32) current 46 arch/ia64/kernel/crash.c prstatus->pr_pid = current->pid; current 144 arch/ia64/kernel/crash.c current->thread.ksp = (__u64)info->sw - 16; current 417 arch/ia64/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 433 arch/ia64/kernel/kprobes.c if (ri->task != current) current 450 arch/ia64/kernel/kprobes.c if (ri->task != current) current 470 arch/ia64/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 95 arch/ia64/kernel/machine_kexec.c current->thread.ksp = (__u64)info->sw - 16; current 875 arch/ia64/kernel/mca.c char *p, comm[sizeof(current->comm)]; current 878 arch/ia64/kernel/mca.c current->comm, previous_current->pid); current 886 arch/ia64/kernel/mca.c current->comm, l, previous_current->comm, current 889 arch/ia64/kernel/mca.c memcpy(current->comm, comm, sizeof(current->comm)); current 989 arch/ia64/kernel/mca.c ia64_set_curr_task(cpu, current); current 990 arch/ia64/kernel/mca.c if ((p = strchr(current->comm, ' '))) current 1034 arch/ia64/kernel/mca.c new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET); current 161 arch/ia64/kernel/mca_drv.c raw_smp_processor_id(), current->pid, current 163 arch/ia64/kernel/mca_drv.c iip, ipsr, paddr, current->comm); current 167 arch/ia64/kernel/perfmon.c DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ current 169 arch/ia64/kernel/perfmon.c DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ current 174 arch/ia64/kernel/perfmon.c DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ current 236 arch/ia64/kernel/perfmon.c if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ current 241 arch/ia64/kernel/perfmon.c if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ current 585 arch/ia64/kernel/perfmon.c if (task != current) put_task_struct(task); current 962 arch/ia64/kernel/perfmon.c if (task != current) { current 963 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); current 968 arch/ia64/kernel/perfmon.c task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); current 1405 arch/ia64/kernel/perfmon.c struct task_struct *task = current; current 1451 arch/ia64/kernel/perfmon.c pfm_buf_fmt_exit(fmt, current, NULL, NULL); current 1464 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); current 1474 arch/ia64/kernel/perfmon.c pfm_buf_fmt_exit(fmt, current, NULL, NULL); current 1508 arch/ia64/kernel/perfmon.c DECLARE_WAITQUEUE(wait, current); current 1510 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); current 1516 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); current 1559 arch/ia64/kernel/perfmon.c if(signal_pending(current)) { current 1570 arch/ia64/kernel/perfmon.c DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); current 1579 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); current 1610 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); current 1616 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); current 1655 arch/ia64/kernel/perfmon.c task_pid_nr(current), current 1670 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); current 1676 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); current 1707 arch/ia64/kernel/perfmon.c struct pt_regs *regs = task_pt_regs(current); current 1784 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); current 1811 arch/ia64/kernel/perfmon.c task == current ? 1 : 0)); current 1820 arch/ia64/kernel/perfmon.c if (task == current) { current 1873 arch/ia64/kernel/perfmon.c if (ctx->ctx_smpl_vaddr && current->mm) { current 1911 arch/ia64/kernel/perfmon.c DECLARE_WAITQUEUE(wait, current); current 1927 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); current 1941 arch/ia64/kernel/perfmon.c task == current ? 1 : 0)); current 2017 arch/ia64/kernel/perfmon.c else if (task != current) { current 2410 arch/ia64/kernel/perfmon.c ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); current 2525 arch/ia64/kernel/perfmon.c if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { current 2538 arch/ia64/kernel/perfmon.c if (task == current) return 0; current 2557 arch/ia64/kernel/perfmon.c struct task_struct *p = current; current 2563 arch/ia64/kernel/perfmon.c if (pid != task_pid_vnr(current)) { current 2573 arch/ia64/kernel/perfmon.c } else if (p != current) { current 2592 arch/ia64/kernel/perfmon.c ret = pfarg_is_sane(current, req); current 2620 arch/ia64/kernel/perfmon.c ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); current 2649 arch/ia64/kernel/perfmon.c pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); current 3347 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; current 3368 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; current 3495 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); current 3499 arch/ia64/kernel/perfmon.c if (task == current || is_system) { current 3848 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; current 3869 arch/ia64/kernel/perfmon.c if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; current 3948 arch/ia64/kernel/perfmon.c if (task == current) { current 4028 arch/ia64/kernel/perfmon.c if (ctx->ctx_task == current) { current 4134 arch/ia64/kernel/perfmon.c if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { current 4150 arch/ia64/kernel/perfmon.c if (is_system && task != current) { current 4209 arch/ia64/kernel/perfmon.c ret = pfm_reserve_session(current, is_system, the_cpu); current 4264 arch/ia64/kernel/perfmon.c if (task == current) { current 4337 arch/ia64/kernel/perfmon.c if (is_system == 0 && task != current) { current 4409 arch/ia64/kernel/perfmon.c pfm_flush_pmds(current, ctx); current 4436 arch/ia64/kernel/perfmon.c tregs = task == current ? regs : task_pt_regs(task); current 4438 arch/ia64/kernel/perfmon.c if (task == current) { current 4630 arch/ia64/kernel/perfmon.c if (task == current || ctx->ctx_fl_system) return 0; current 4760 arch/ia64/kernel/perfmon.c printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz); current 4832 arch/ia64/kernel/perfmon.c ret = (*func)(ctx, args_k, count, task_pt_regs(current)); current 4875 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); current 4877 arch/ia64/kernel/perfmon.c ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); current 4889 arch/ia64/kernel/perfmon.c if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); current 4907 arch/ia64/kernel/perfmon.c DPRINT(("entering for [%d]\n", task_pid_nr(current))); current 4911 arch/ia64/kernel/perfmon.c printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret); current 4947 arch/ia64/kernel/perfmon.c ctx = PFM_GET_CTX(current); current 4950 arch/ia64/kernel/perfmon.c task_pid_nr(current)); current 4956 arch/ia64/kernel/perfmon.c PFM_SET_WORK_PENDING(current, 0); current 4958 arch/ia64/kernel/perfmon.c regs = task_pt_regs(current); current 6326 arch/ia64/kernel/perfmon.c regs = task_pt_regs(current); current 6352 arch/ia64/kernel/perfmon.c regs = task_pt_regs(current); current 6617 arch/ia64/kernel/perfmon.c regs = task_pt_regs(current); current 6629 arch/ia64/kernel/perfmon.c task_pid_nr(current), current 6631 arch/ia64/kernel/perfmon.c current->comm); current 160 arch/ia64/kernel/perfmon_default_smpl.c ent->pid = current->pid; current 172 arch/ia64/kernel/perfmon_default_smpl.c ent->tgid = current->tgid; current 166 arch/ia64/kernel/process.c if (fsys_mode(current, &scr->pt)) { current 177 arch/ia64/kernel/process.c if (current->thread.pfm_needs_checking) current 349 arch/ia64/kernel/process.c rbs = (unsigned long) current + IA64_RBS_OFFSET; current 374 arch/ia64/kernel/process.c p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) current 437 arch/ia64/kernel/process.c if (current->thread.pfm_context) current 540 arch/ia64/kernel/process.c do_copy_task_regs(current, info, arg); current 546 arch/ia64/kernel/process.c do_dump_task_fpu(current, info, arg); current 569 arch/ia64/kernel/process.c current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); current 570 arch/ia64/kernel/process.c ia64_drop_fpu(current); current 600 arch/ia64/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 608 arch/ia64/kernel/ptrace.c if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) current 610 arch/ia64/kernel/ptrace.c set_notify_resume(current); current 619 arch/ia64/kernel/ptrace.c clear_tsk_thread_flag(current, TIF_RESTORE_RSE); current 1835 arch/ia64/kernel/ptrace.c if (target == current) current 2192 arch/ia64/kernel/ptrace.c if (task == current) current 998 arch/ia64/kernel/setup.c memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); current 1023 arch/ia64/kernel/setup.c current->active_mm = &init_mm; current 1024 arch/ia64/kernel/setup.c BUG_ON(current->mm); current 50 arch/ia64/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 93 arch/ia64/kernel/signal.c err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); current 97 arch/ia64/kernel/signal.c ia64_drop_fpu(current); current 100 arch/ia64/kernel/signal.c __ia64_load_fpu(current->thread.fph); current 101 arch/ia64/kernel/signal.c ia64_set_local_fpu_owner(current); current 148 arch/ia64/kernel/signal.c current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); current 179 arch/ia64/kernel/signal.c ia64_flush_fph(current); current 180 arch/ia64/kernel/signal.c if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { current 182 arch/ia64/kernel/signal.c err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); current 223 arch/ia64/kernel/signal.c return (bsp - current->sas_ss_sp < current->sas_ss_size); current 240 arch/ia64/kernel/signal.c new_sp = current->sas_ss_sp + current->sas_ss_size; current 248 arch/ia64/kernel/signal.c new_rbs = ALIGN(current->sas_ss_sp, current 312 arch/ia64/kernel/signal.c current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); current 298 arch/ia64/kernel/smp.c if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) current 32 arch/ia64/kernel/sys_ia64.c struct mm_struct *mm = current->mm; current 105 arch/ia64/kernel/sys_ia64.c struct pt_regs *regs = task_pt_regs(current); current 111 arch/ia64/kernel/time.c struct thread_info *ni = task_thread_info(current); current 68 arch/ia64/kernel/traps.c current->comm, task_pid_nr(current), str, err, ++die_counter); current 208 arch/ia64/kernel/traps.c if (ia64_is_local_fpu_owner(current)) { current 217 arch/ia64/kernel/traps.c ia64_set_local_fpu_owner(current); current 218 arch/ia64/kernel/traps.c if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) { current 219 arch/ia64/kernel/traps.c __ia64_load_fpu(current->thread.fph); current 295 arch/ia64/kernel/traps.c if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { current 323 arch/ia64/kernel/traps.c current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); current 454 arch/ia64/kernel/traps.c current->comm, task_pid_nr(current), current 505 arch/ia64/kernel/traps.c if (fsys_mode(current, ®s)) { current 551 arch/ia64/kernel/traps.c if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { current 312 arch/ia64/kernel/unaligned.c unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; current 349 arch/ia64/kernel/unaligned.c if (!user_stack(current, regs)) { current 361 arch/ia64/kernel/unaligned.c ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); current 365 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); current 374 arch/ia64/kernel/unaligned.c ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats); current 385 arch/ia64/kernel/unaligned.c unsigned long *kbs = (void *) current + IA64_RBS_OFFSET; current 420 arch/ia64/kernel/unaligned.c if (!user_stack(current, regs)) { current 432 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); current 440 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); current 537 arch/ia64/kernel/unaligned.c ia64_sync_fph(current); current 538 arch/ia64/kernel/unaligned.c current->thread.fph[fph_index(regs, regnum)] = *fpval; current 596 arch/ia64/kernel/unaligned.c ia64_flush_fph(current); current 597 arch/ia64/kernel/unaligned.c *fpval = current->thread.fph[fph_index(regs, regnum)]; current 1323 arch/ia64/kernel/unaligned.c if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0) current 1327 arch/ia64/kernel/unaligned.c !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) && current 1334 arch/ia64/kernel/unaligned.c "ip=0x%016lx\n\r", current->comm, current 1335 arch/ia64/kernel/unaligned.c task_pid_nr(current), current 1360 arch/ia64/kernel/unaligned.c current->comm, task_pid_nr(current)); current 65 arch/ia64/mm/fault.c struct mm_struct *mm = current->mm; current 144 arch/ia64/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 166 arch/ia64/mm/fault.c current->maj_flt++; current 168 arch/ia64/mm/fault.c current->min_flt++; current 93 arch/ia64/mm/init.c current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); current 114 arch/ia64/mm/init.c vma = vm_area_alloc(current->mm); current 117 arch/ia64/mm/init.c vma->vm_start = current->thread.rbs_bot & PAGE_MASK; current 121 arch/ia64/mm/init.c down_write(¤t->mm->mmap_sem); current 122 arch/ia64/mm/init.c if (insert_vm_struct(current->mm, vma)) { current 123 arch/ia64/mm/init.c up_write(¤t->mm->mmap_sem); current 127 arch/ia64/mm/init.c up_write(¤t->mm->mmap_sem); current 131 arch/ia64/mm/init.c if (!(current->personality & MMAP_PAGE_ZERO)) { current 132 arch/ia64/mm/init.c vma = vm_area_alloc(current->mm); current 139 arch/ia64/mm/init.c down_write(¤t->mm->mmap_sem); current 140 arch/ia64/mm/init.c if (insert_vm_struct(current->mm, vma)) { current 141 arch/ia64/mm/init.c up_write(¤t->mm->mmap_sem); current 145 arch/ia64/mm/init.c up_write(¤t->mm->mmap_sem); current 253 arch/ia64/mm/tlb.c struct mm_struct *active_mm = current->active_mm; current 320 arch/ia64/mm/tlb.c if (mm != current->active_mm) { current 336 arch/ia64/mm/tlb.c if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { current 81 arch/ia64/oprofile/backtrace.c unw_init_frame_info(&bt->frame, current, sw); current 48 arch/m68k/coldfire/sltimers.c if (current->pid) current 166 arch/m68k/coldfire/timers.c if (current->pid) current 198 arch/m68k/include/asm/cacheflush_mm.h if (mm == current->mm) current 210 arch/m68k/include/asm/cacheflush_mm.h if (vma->vm_mm == current->mm) current 216 arch/m68k/include/asm/cacheflush_mm.h if (vma->vm_mm == current->mm) current 7 arch/m68k/include/asm/current.h register struct task_struct *current __asm__("%a2"); current 13 arch/m68k/include/asm/flat.h if (current->mm) \ current 14 arch/m68k/include/asm/flat.h (regs)->d5 = current->mm->start_data; \ current 128 arch/m68k/include/asm/math-emu.h #define FPDATA ((struct fp_data *)current->thread.fp) current 137 arch/m68k/include/asm/processor.h #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) current 81 arch/m68k/include/asm/tlbflush.h if (mm == current->active_mm) current 87 arch/m68k/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) { current 98 arch/m68k/include/asm/tlbflush.h if (vma->vm_mm == current->active_mm) current 95 arch/m68k/kernel/process.c current->thread.fs = __USER_DS; current 201 arch/m68k/kernel/process.c memcpy(fpu->fpcntl, current->thread.fpcntl, 12); current 202 arch/m68k/kernel/process.c memcpy(fpu->fpregs, current->thread.fp, 96); current 252 arch/m68k/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 277 arch/m68k/kernel/ptrace.c ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) current 284 arch/m68k/kernel/ptrace.c if (current->exit_code) { current 285 arch/m68k/kernel/ptrace.c send_sig(current->exit_code, current, 1); current 286 arch/m68k/kernel/ptrace.c current->exit_code = 0; current 296 arch/m68k/kernel/ptrace.c ret = tracehook_report_syscall_entry(task_pt_regs(current)); current 303 arch/m68k/kernel/ptrace.c tracehook_report_syscall_exit(task_pt_regs(current), 0); current 262 arch/m68k/kernel/signal.c memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12); current 263 arch/m68k/kernel/signal.c memcpy(current->thread.fp, sc->sc_fpregs, 24); current 341 arch/m68k/kernel/signal.c if (__copy_from_user(current->thread.fpcntl, current 345 arch/m68k/kernel/signal.c if (__copy_from_user(current->thread.fp, current 434 arch/m68k/kernel/signal.c memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12); current 435 arch/m68k/kernel/signal.c memcpy(sc->sc_fpregs, current->thread.fp, 24); current 492 arch/m68k/kernel/signal.c current->thread.fpcntl, 12); current 495 arch/m68k/kernel/signal.c current->thread.fp, 96); current 703 arch/m68k/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 741 arch/m68k/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 1103 arch/m68k/kernel/signal.c send_sig(SIGTRAP, current, 1); current 1116 arch/m68k/kernel/signal.c current->thread.esp0 = (unsigned long) regs; current 402 arch/m68k/kernel/sys_m68k.c down_read(¤t->mm->mmap_sem); current 403 arch/m68k/kernel/sys_m68k.c vma = find_vma(current->mm, addr); current 453 arch/m68k/kernel/sys_m68k.c up_read(¤t->mm->mmap_sem); current 466 arch/m68k/kernel/sys_m68k.c struct mm_struct *mm = current->mm; current 537 arch/m68k/kernel/sys_m68k.c struct mm_struct *mm = current->mm; current 239 arch/m68k/kernel/traps.c if (wba != current->thread.faddr) current 357 arch/m68k/kernel/traps.c current->thread.signo = SIGBUS; current 358 arch/m68k/kernel/traps.c current->thread.faddr = fp->un.fmt7.faddr; current 511 arch/m68k/kernel/traps.c pr_debug("pid = %x ", current->pid); current 539 arch/m68k/kernel/traps.c temp, addr, current); current 647 arch/m68k/kernel/traps.c temp, addr, current); current 761 arch/m68k/kernel/traps.c current->thread.esp0 = (unsigned long) fp; current 863 arch/m68k/kernel/traps.c current->comm, task_pid_nr(current), current); current 1004 arch/m68k/kernel/traps.c pr_err("Current process id is %d\n", task_pid_nr(current)); current 1147 arch/m68k/kernel/traps.c current->thread.esp0 = ssp; current 27 arch/m68k/mm/fault.c signo = current->thread.signo; current 28 arch/m68k/mm/fault.c si_code = current->thread.code; current 29 arch/m68k/mm/fault.c addr = (void __user *)current->thread.faddr; current 71 arch/m68k/mm/fault.c struct mm_struct *mm = current->mm; current 141 arch/m68k/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 161 arch/m68k/mm/fault.c current->maj_flt++; current 163 arch/m68k/mm/fault.c current->min_flt++; current 195 arch/m68k/mm/fault.c current->thread.signo = SIGBUS; current 196 arch/m68k/mm/fault.c current->thread.faddr = address; current 200 arch/m68k/mm/fault.c current->thread.signo = SIGBUS; current 201 arch/m68k/mm/fault.c current->thread.code = BUS_ADRERR; current 202 arch/m68k/mm/fault.c current->thread.faddr = address; current 206 arch/m68k/mm/fault.c current->thread.signo = SIGSEGV; current 207 arch/m68k/mm/fault.c current->thread.code = SEGV_MAPERR; current 208 arch/m68k/mm/fault.c current->thread.faddr = address; current 212 arch/m68k/mm/fault.c current->thread.signo = SIGSEGV; current 213 arch/m68k/mm/fault.c current->thread.code = SEGV_ACCERR; current 214 arch/m68k/mm/fault.c current->thread.faddr = address; current 82 arch/m68k/mm/mcfmmu.c current->mm = NULL; current 104 arch/m68k/mm/mcfmmu.c mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm; current 89 arch/m68k/mm/sun3mmu.c current->mm = NULL; current 363 arch/m68k/sun3/mmu_emu.c if(current->mm == NULL) { current 367 arch/m68k/sun3/mmu_emu.c context = current->mm->context; current 371 arch/m68k/sun3/mmu_emu.c crp = current->mm->pgd; current 24 arch/microblaze/include/asm/current.h register struct task_struct *current asm("r31"); current 26 arch/microblaze/include/asm/current.h # define get_current() current current 28 arch/microblaze/include/asm/elf.h set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK))) current 135 arch/microblaze/include/asm/mmu_context_mm.h current->thread.pgdir = mm->pgd; current 32 arch/microblaze/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 179 arch/microblaze/kernel/setup.c per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; current 92 arch/microblaze/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 199 arch/microblaze/kernel/signal.c pgd_offset(current->mm, address), current 234 arch/microblaze/kernel/signal.c current->comm, current->pid, frame, regs->pc); current 75 arch/microblaze/kernel/traps.c task = current; current 289 arch/microblaze/kernel/unwind.c if (task == current) { current 317 arch/microblaze/kernel/unwind.c microblaze_unwind_inner(current, pc, fp, 0, trace); current 90 arch/microblaze/mm/fault.c struct mm_struct *mm = current->mm; current 173 arch/microblaze/mm/fault.c struct pt_regs *uregs = current->thread.regs; current 220 arch/microblaze/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 235 arch/microblaze/mm/fault.c current->maj_flt++; current 237 arch/microblaze/mm/fault.c current->min_flt++; current 223 arch/microblaze/mm/pgtable.c mm = current->mm; current 330 arch/microblaze/pci/pci-common.c current->comm, current->pid); current 31 arch/mips/cavium-octeon/cpu.c prefetch(¤t->thread.cp2); current 33 arch/mips/cavium-octeon/cpu.c KSTK_STATUS(current) |= ST0_CU2; current 36 arch/mips/cavium-octeon/cpu.c octeon_cop2_restore(&(current->thread.cp2)); current 36 arch/mips/cavium-octeon/crypto/octeon-crypto.c if (KSTK_STATUS(current) & ST0_CU2) { current 37 arch/mips/cavium-octeon/crypto/octeon-crypto.c octeon_cop2_save(&(current->thread.cp2)); current 38 arch/mips/cavium-octeon/crypto/octeon-crypto.c KSTK_STATUS(current) &= ~ST0_CU2; current 75 arch/mips/include/asm/dsp.h if (tsk == current) \ current 76 arch/mips/include/asm/dsp.h __save_dsp(current); \ current 350 arch/mips/include/asm/elf.h current->thread.abi = &mips_abi; \ current 355 arch/mips/include/asm/elf.h if (personality(current->personality) != PER_LINUX) \ current 368 arch/mips/include/asm/elf.h current->thread.abi = &mips_abi_n32; \ current 383 arch/mips/include/asm/elf.h current->thread.abi = &mips_abi_32; \ current 417 arch/mips/include/asm/elf.h current->thread.abi = &mips_abi; \ current 421 arch/mips/include/asm/elf.h p = personality(current->personality); \ current 476 arch/mips/include/asm/elf.h (unsigned long)current->mm->context.vdso); \ current 132 arch/mips/include/asm/fpu.h KSTK_STATUS(current) |= ST0_CU1; current 134 arch/mips/include/asm/fpu.h KSTK_STATUS(current) |= ST0_FR; current 136 arch/mips/include/asm/fpu.h KSTK_STATUS(current) &= ~ST0_FR; current 149 arch/mips/include/asm/fpu.h _restore_fp(current); current 191 arch/mips/include/asm/fpu.h lose_fpu_inatomic(save, current); current 241 arch/mips/include/asm/fpu.h if (tsk == current) { current 244 arch/mips/include/asm/fpu.h _save_fp(current); current 188 arch/mips/include/asm/mmu_context.h #define activate_mm(prev, next) switch_mm(prev, next, current) current 258 arch/mips/include/asm/page.h ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ current 37 arch/mips/include/asm/stackprotector.h current->stack_canary = canary; current 38 arch/mips/include/asm/stackprotector.h __stack_chk_guard = current->stack_canary; current 154 arch/mips/kernel/branch.c fcr31 = current->thread.fpu.fcr31; current 691 arch/mips/kernel/branch.c if (!init_fp_ctx(current)) current 694 arch/mips/kernel/branch.c bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1; current 712 arch/mips/kernel/branch.c fcr31 = current->thread.fpu.fcr31; current 831 arch/mips/kernel/branch.c current->comm); current 836 arch/mips/kernel/branch.c current->comm); current 841 arch/mips/kernel/branch.c current->comm); current 869 arch/mips/kernel/branch.c printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); current 34 arch/mips/kernel/crash.c regs = task_pt_regs(current); current 310 arch/mips/kernel/elf.c struct task_struct *t = current; current 332 arch/mips/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 141 arch/mips/kernel/kgdb.c } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { current 147 arch/mips/kernel/kgdb.c memcpy((void *)¤t->thread.fpu.fcr31, mem, current 155 arch/mips/kernel/kgdb.c memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem, current 158 arch/mips/kernel/kgdb.c restore_fp(current); current 175 arch/mips/kernel/kgdb.c } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { current 179 arch/mips/kernel/kgdb.c save_fp(current); current 182 arch/mips/kernel/kgdb.c memcpy(mem, (void *)¤t->thread.fpu.fcr31, current 191 arch/mips/kernel/kgdb.c memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg], current 222 arch/mips/kernel/kprobes.c pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); current 498 arch/mips/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 514 arch/mips/kernel/kprobes.c if (ri->task != current) current 536 arch/mips/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 93 arch/mips/kernel/linux32.c if (personality(current->personality) == PER_LINUX32 && current 43 arch/mips/kernel/mips-mt-fpaff.c return pid ? find_task_by_vpid(pid) : current; current 204 arch/mips/kernel/mips-r2-to-r6-emul.c csr = current->thread.fpu.fcr31; current 227 arch/mips/kernel/mips-r2-to-r6-emul.c csr = current->thread.fpu.fcr31; current 1178 arch/mips/kernel/mips-r2-to-r6-emul.c err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, current 1185 arch/mips/kernel/mips-r2-to-r6-emul.c *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31); current 1186 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.fpu.fcr31 &= ~res; current 1199 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = (unsigned long)fault_addr; current 1209 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1282 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1356 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1426 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1501 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1620 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1739 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1857 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1969 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 1974 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2025 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2030 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2088 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2093 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2149 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 2154 arch/mips/kernel/mips-r2-to-r6-emul.c current->thread.cp0_baduaddr = vaddr; current 52 arch/mips/kernel/perf_event.c (unsigned long)task_stack_page(current); current 62 arch/mips/kernel/perf_event.c pc = unwind_stack(current, &sp, pc, &ra); current 30 arch/mips/kernel/pm.c save_dsp(current); current 44 arch/mips/kernel/pm.c if (current->mm) current 45 arch/mips/kernel/pm.c write_c0_entryhi(cpu_asid(cpu, current->mm)); current 48 arch/mips/kernel/pm.c restore_dsp(current); current 55 arch/mips/kernel/pm.c __restore_watch(current); current 78 arch/mips/kernel/process.c atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); current 90 arch/mips/kernel/process.c if (!(current->flags & PF_KTHREAD)) current 106 arch/mips/kernel/process.c save_msa(current); current 108 arch/mips/kernel/process.c _save_fp(current); current 110 arch/mips/kernel/process.c save_dsp(current); current 631 arch/mips/kernel/process.c if (!task || task == current || task->state == TASK_RUNNING) current 657 arch/mips/kernel/process.c top -= PAGE_ALIGN(current->thread.abi->vdso->size); current 666 arch/mips/kernel/process.c if (current->flags & PF_RANDOMIZE) current 678 arch/mips/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 1421 arch/mips/kernel/ptrace.c sd.arch = syscall_get_arch(current); current 1422 arch/mips/kernel/ptrace.c syscall_get_arguments(current, regs, args); current 1425 arch/mips/kernel/ptrace.c sd.instruction_pointer = KSTK_EIP(current); current 1445 arch/mips/kernel/ptrace.c syscall_set_return_value(current, regs, -ENOSYS, 0); current 137 arch/mips/kernel/rtlx.c if (!signal_pending(current)) { current 73 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 82 arch/mips/kernel/signal.c __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), current 85 arch/mips/kernel/signal.c err |= __put_user(current->thread.fpu.fcr31, csr); current 92 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 102 arch/mips/kernel/signal.c set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); current 104 arch/mips/kernel/signal.c err |= __get_user(current->thread.fpu.fcr31, csr); current 128 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 137 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 195 arch/mips/kernel/signal.c err = __put_user(current->thread.fpu.msacsr, &msa->csr); current 198 arch/mips/kernel/signal.c val = get_fpr64(¤t->thread.fpu.fpr[i], 1); current 241 arch/mips/kernel/signal.c current->thread.fpu.msacsr = csr; current 245 arch/mips/kernel/signal.c set_fpr64(¤t->thread.fpu.fpr[i], 1, val); current 327 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 380 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 523 arch/mips/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 734 arch/mips/kernel/signal.c current->comm, current->pid, current 781 arch/mips/kernel/signal.c current->comm, current->pid, current 805 arch/mips/kernel/signal.c struct mips_abi *abi = current->thread.abi; current 806 arch/mips/kernel/signal.c void *vdso = current->mm->context.vdso; current 870 arch/mips/kernel/signal.c regs->regs[2] = current->thread.abi->restart; current 134 arch/mips/kernel/signal_n32.c current->comm, current->pid, current 93 arch/mips/kernel/signal_o32.c current->restart_block.fn = do_no_restart_syscall; current 148 arch/mips/kernel/signal_o32.c current->comm, current->pid, current 234 arch/mips/kernel/signal_o32.c current->comm, current->pid, current 420 arch/mips/kernel/smp.c init_new_context(current, &init_mm); current 538 arch/mips/kernel/smp.c } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { current 588 arch/mips/kernel/smp.c } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { current 656 arch/mips/kernel/smp.c (current->mm != vma->vm_mm)) { current 74 arch/mips/kernel/stacktrace.c save_stack_trace_tsk(current, trace); current 85 arch/mips/kernel/stacktrace.c if (tsk != current) { current 91 arch/mips/kernel/stacktrace.c save_context_stack(trace, tsk, regs, tsk == current); current 87 arch/mips/kernel/syscall.c struct thread_info *ti = task_thread_info(current); current 169 arch/mips/kernel/syscall.c ll_task = current; current 148 arch/mips/kernel/traps.c task = current; current 209 arch/mips/kernel/traps.c if (task && task != current) { current 366 arch/mips/kernel/traps.c current->comm, current->pid, current_thread_info(), current, current 379 arch/mips/kernel/traps.c show_stacktrace(current, regs); current 394 arch/mips/kernel/traps.c if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, current 415 arch/mips/kernel/traps.c if (regs && kexec_should_crash(current)) current 480 arch/mips/kernel/traps.c if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr, current 546 arch/mips/kernel/traps.c if (ll_task == NULL || ll_task == current) { current 551 arch/mips/kernel/traps.c ll_task = current; current 585 arch/mips/kernel/traps.c if (ll_bit == 0 || ll_task != current) { current 630 arch/mips/kernel/traps.c struct thread_info *ti = task_thread_info(current); current 749 arch/mips/kernel/traps.c force_fcr31_sig(fcr31, fault_addr, current); current 757 arch/mips/kernel/traps.c down_read(¤t->mm->mmap_sem); current 758 arch/mips/kernel/traps.c vma = find_vma(current->mm, (unsigned long)fault_addr); current 763 arch/mips/kernel/traps.c up_read(¤t->mm->mmap_sem); current 803 arch/mips/kernel/traps.c sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, current 810 arch/mips/kernel/traps.c fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); current 811 arch/mips/kernel/traps.c current->thread.fpu.fcr31 &= ~fcr31; current 832 arch/mips/kernel/traps.c if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, current 855 arch/mips/kernel/traps.c sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, current 862 arch/mips/kernel/traps.c fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); current 863 arch/mips/kernel/traps.c current->thread.fpu.fcr31 &= ~fcr31; current 888 arch/mips/kernel/traps.c ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { current 894 arch/mips/kernel/traps.c if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { current 897 arch/mips/kernel/traps.c current->thread.user_cpus_allowed current 898 arch/mips/kernel/traps.c = current->cpus_mask; current 899 arch/mips/kernel/traps.c cpumask_and(&tmask, ¤t->cpus_mask, current 901 arch/mips/kernel/traps.c set_cpus_allowed_ptr(current, &tmask); current 924 arch/mips/kernel/traps.c if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, current 929 arch/mips/kernel/traps.c if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr, current 990 arch/mips/kernel/traps.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 1032 arch/mips/kernel/traps.c current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) current 1038 arch/mips/kernel/traps.c current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) current 1044 arch/mips/kernel/traps.c current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) current 1050 arch/mips/kernel/traps.c current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) current 1083 arch/mips/kernel/traps.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 1139 arch/mips/kernel/traps.c ¤t->thread.cp0_baduaddr, current 1148 arch/mips/kernel/traps.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 1150 arch/mips/kernel/traps.c if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, current 1236 arch/mips/kernel/traps.c first_fp = init_fp_ctx(current); current 1291 arch/mips/kernel/traps.c write_msa_csr(current->thread.fpu.msacsr); current 1313 arch/mips/kernel/traps.c _restore_fp(current); current 1323 arch/mips/kernel/traps.c restore_msa(current); current 1328 arch/mips/kernel/traps.c current->thread.fpu.fcr31); current 1421 arch/mips/kernel/traps.c sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, current 1428 arch/mips/kernel/traps.c fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); current 1429 arch/mips/kernel/traps.c current->thread.fpu.fcr31 &= ~fcr31; current 1457 arch/mips/kernel/traps.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 1459 arch/mips/kernel/traps.c current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) current 1521 arch/mips/kernel/traps.c if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { current 2218 arch/mips/kernel/traps.c current->active_mm = &init_mm; current 2219 arch/mips/kernel/traps.c BUG_ON(current->mm); current 2220 arch/mips/kernel/traps.c enter_lazy_tlb(&init_mm, current); current 1222 arch/mips/kernel/unaligned.c res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, current 1254 arch/mips/kernel/unaligned.c fpr = ¤t->thread.fpu.fpr[wd]; current 1742 arch/mips/kernel/unaligned.c res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, current 107 arch/mips/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 119 arch/mips/kernel/uprobes.c utask->autask.saved_trap_nr = current->thread.trap_nr; current 120 arch/mips/kernel/uprobes.c current->thread.trap_nr = UPROBE_TRAP_NR; current 121 arch/mips/kernel/uprobes.c regs->cp0_epc = current->utask->xol_vaddr; current 128 arch/mips/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 130 arch/mips/kernel/uprobes.c current->thread.trap_nr = utask->autask.saved_trap_nr; current 191 arch/mips/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 79 arch/mips/kernel/vdso.c if (current->flags & PF_RANDOMIZE) { current 89 arch/mips/kernel/vdso.c struct mips_vdso_image *image = current->thread.abi->vdso; current 90 arch/mips/kernel/vdso.c struct mm_struct *mm = current->mm; current 53 arch/mips/kernel/watch.c ¤t->thread.watch.mips3264; current 1449 arch/mips/kvm/mips.c if (signal_pending(current)) { current 1683 arch/mips/kvm/mips.c if (!(current->flags & PF_VCPU)) current 643 arch/mips/kvm/tlb.c cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); current 644 arch/mips/kvm/tlb.c current->active_mm = &init_mm; current 657 arch/mips/kvm/tlb.c cpumask_set_cpu(cpu, mm_cpumask(current->mm)); current 658 arch/mips/kvm/tlb.c current->active_mm = current->mm; current 1057 arch/mips/kvm/trap_emul.c if (current->flags & PF_VCPU) { current 1071 arch/mips/kvm/trap_emul.c if (current->flags & PF_VCPU) { current 1073 arch/mips/kvm/trap_emul.c check_switch_mmu_context(current->mm); current 1259 arch/mips/kvm/trap_emul.c check_switch_mmu_context(current->mm); current 2487 arch/mips/kvm/vz.c if (current->flags & PF_VCPU) { current 2610 arch/mips/kvm/vz.c if (current->flags & PF_VCPU) current 38 arch/mips/loongson64/loongson-3/cop2-ex.c KSTK_STATUS(current) |= (ST0_CU1 | ST0_CU2); current 40 arch/mips/loongson64/loongson-3/cop2-ex.c KSTK_STATUS(current) |= ST0_FR; current 42 arch/mips/loongson64/loongson-3/cop2-ex.c KSTK_STATUS(current) &= ~ST0_FR; current 46 arch/mips/loongson64/loongson-3/cop2-ex.c init_fp_ctx(current); current 47 arch/mips/loongson64/loongson-3/cop2-ex.c _restore_fp(current); current 706 arch/mips/math-emu/cp1emu.c fpr = ¤t->thread.fpu.fpr[insn.i_format.rt]; current 736 arch/mips/math-emu/cp1emu.c fcr31 = current->thread.fpu.fcr31; current 1191 arch/mips/math-emu/cp1emu.c fpr = ¤t->thread.fpu.fpr[MIPSInst_RT(ir)]; current 2826 arch/mips/math-emu/cp1emu.c if (!init_fp_ctx(current)) current 77 arch/mips/math-emu/dsemul.c mm_context_t *mm_ctx = ¤t->mm->context; current 119 arch/mips/math-emu/dsemul.c pr_debug("allocate emuframe %d to %d\n", idx, current->pid); current 131 arch/mips/math-emu/dsemul.c pr_debug("free emuframe %d from %d\n", idx, current->pid); current 183 arch/mips/math-emu/dsemul.c fr_idx = atomic_read(¤t->thread.bd_emu_frame); current 196 arch/mips/math-emu/dsemul.c regs->cp0_epc = current->thread.bd_emu_branch_pc; current 198 arch/mips/math-emu/dsemul.c regs->cp0_epc = current->thread.bd_emu_cont_pc; current 200 arch/mips/math-emu/dsemul.c atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); current 201 arch/mips/math-emu/dsemul.c free_emuframe(fr_idx, current->mm); current 249 arch/mips/math-emu/dsemul.c fr_idx = atomic_read(¤t->thread.bd_emu_frame); current 276 arch/mips/math-emu/dsemul.c ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr), current 280 arch/mips/math-emu/dsemul.c free_emuframe(fr_idx, current->mm); current 285 arch/mips/math-emu/dsemul.c current->thread.bd_emu_branch_pc = branch_pc; current 286 arch/mips/math-emu/dsemul.c current->thread.bd_emu_cont_pc = cont_pc; current 287 arch/mips/math-emu/dsemul.c atomic_set(¤t->thread.bd_emu_frame, fr_idx); current 298 arch/mips/math-emu/dsemul.c if (!dsemul_thread_cleanup(current)) { current 304 arch/mips/math-emu/dsemul.c xcp->cp0_epc = current->thread.bd_emu_cont_pc; current 154 arch/mips/math-emu/ieee754.h #define ieee754_csr (*(struct _ieee754_csr *)(¤t->thread.fpu.fcr31)) current 678 arch/mips/mm/c-r4k.c if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) current 702 arch/mips/mm/c-r4k.c if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { current 203 arch/mips/mm/c-tx39.c if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { current 42 arch/mips/mm/fault.c struct task_struct *tsk = current; current 53 arch/mips/mm/fault.c current->comm, current->pid, field, address, write, current 62 arch/mips/mm/fault.c current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) current 127 arch/mips/mm/fault.c current->comm, current->pid, current 138 arch/mips/mm/fault.c current->comm, current->pid, current 157 arch/mips/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 225 arch/mips/mm/fault.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 233 arch/mips/mm/fault.c current->thread.cp0_baduaddr = address; current 280 arch/mips/mm/fault.c current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; current 33 arch/mips/mm/mmap.c struct mm_struct *mm = current->mm; current 191 arch/mips/mm/tlb-r3k.c if (current->active_mm != vma->vm_mm) current 306 arch/mips/mm/tlb-r4k.c if (current->active_mm != vma->vm_mm) current 103 arch/mips/netlogic/xlp/cop2-ex.c KSTK_STATUS(current) |= ST0_CU2; current 106 arch/mips/netlogic/xlp/cop2-ex.c nlm_cop2_restore(&(current->thread.cp2)); current 110 arch/mips/netlogic/xlp/cop2-ex.c current->pid, current->comm); current 21 arch/mips/power/cpu.c save_fp(current); current 23 arch/mips/power/cpu.c save_dsp(current); current 31 arch/mips/power/cpu.c restore_fp(current); current 33 arch/mips/power/cpu.c restore_dsp(current); current 8 arch/nds32/include/asm/current.h register struct task_struct *current asm("$r25"); current 175 arch/nds32/include/asm/elf.h (elf_addr_t)current->mm->context.vdso); \ current 78 arch/nds32/include/asm/fpu.h if (last_task_used_math == current) { current 81 arch/nds32/include/asm/fpu.h if (test_tsk_fpu(task_pt_regs(current))) { current 83 arch/nds32/include/asm/fpu.h save_fpu(current); current 85 arch/nds32/include/asm/fpu.h disable_ptreg_fpu(task_pt_regs(current)); current 93 arch/nds32/include/asm/fpu.h if (last_task_used_math != current) { current 96 arch/nds32/include/asm/fpu.h load_fpu(¤t->thread.fpu); current 97 arch/nds32/include/asm/fpu.h last_task_used_math = current; current 100 arch/nds32/include/asm/fpu.h if (!test_tsk_fpu(task_pt_regs(current))) { current 101 arch/nds32/include/asm/fpu.h load_fpu(¤t->thread.fpu); current 104 arch/nds32/include/asm/fpu.h enable_ptreg_fpu(task_pt_regs(current)); current 53 arch/nds32/include/asm/sfp-machine.h #define __FPU_FPCSR (current->thread.fpu.fpcsr) current 152 arch/nds32/kernel/fpu.c unlazy_fpu(current); current 154 arch/nds32/kernel/fpu.c clear_fpu(task_pt_regs(current)); current 168 arch/nds32/kernel/fpu.c if (last_task_used_math == current) current 173 arch/nds32/kernel/fpu.c last_task_used_math = current; current 176 arch/nds32/kernel/fpu.c load_fpu(¤t->thread.fpu); current 181 arch/nds32/kernel/fpu.c current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap; current 215 arch/nds32/kernel/fpu.c fpcsr = current->thread.fpu.fpcsr; current 218 arch/nds32/kernel/fpu.c si_signo = do_fpuemu(regs, ¤t->thread.fpu); current 219 arch/nds32/kernel/fpu.c fpcsr = current->thread.fpu.fpcsr; current 221 arch/nds32/kernel/fpu.c current->thread.fpu.fpcsr &= ~(redo_except); current 215 arch/nds32/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 140 arch/nds32/kernel/process.c clear_fpu(task_pt_regs(current)); current 143 arch/nds32/kernel/process.c if (last_task_used_math == current) current 182 arch/nds32/kernel/process.c unlazy_fpu(current); current 185 arch/nds32/kernel/process.c if (last_task_used_math == current) current 186 arch/nds32/kernel/process.c save_fpu(current); current 189 arch/nds32/kernel/process.c p->thread.fpu = current->thread.fpu; current 223 arch/nds32/kernel/process.c struct task_struct *tsk = current; current 242 arch/nds32/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 28 arch/nds32/kernel/signal.c struct task_struct *tsk = current; current 41 arch/nds32/kernel/signal.c if (current == last_task_used_math) { current 57 arch/nds32/kernel/signal.c struct task_struct *tsk = current; current 142 arch/nds32/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 217 arch/nds32/kernel/signal.c __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, current 219 arch/nds32/kernel/signal.c __put_user_error(current->thread.error_code, current 221 arch/nds32/kernel/signal.c __put_user_error(current->thread.address, current 262 arch/nds32/kernel/signal.c retcode = VDSO_SYMBOL(current->mm->context.vdso, rt_sigtramp); current 11 arch/nds32/kernel/stacktrace.c save_stack_trace_tsk(current, trace); current 22 arch/nds32/kernel/stacktrace.c if (tsk == current) { current 34 arch/nds32/kernel/sys_nds32.c vma = find_vma(current->mm, start); current 61 arch/nds32/kernel/sys_nds32.c current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap; current 65 arch/nds32/kernel/sys_nds32.c old_udf_iex = current->thread.fpu.UDF_IEX_trap; current 70 arch/nds32/kernel/sys_nds32.c current->thread.fpu.UDF_IEX_trap &= ~act; current 73 arch/nds32/kernel/sys_nds32.c current->thread.fpu.UDF_IEX_trap |= act; current 142 arch/nds32/kernel/traps.c tsk = current; current 144 arch/nds32/kernel/traps.c if (tsk != current) current 149 arch/nds32/kernel/traps.c if (tsk != current) current 165 arch/nds32/kernel/traps.c struct task_struct *tsk = current; current 202 arch/nds32/kernel/traps.c if (current->personality != PER_LINUX) { current 203 arch/nds32/kernel/traps.c send_sig(SIGSEGV, current, 1); current 260 arch/nds32/kernel/traps.c struct task_struct *tsk = current; current 96 arch/nds32/kernel/vdso.c unsigned long start = current->mm->mmap_base, end, offset, addr; current 116 arch/nds32/kernel/vdso.c struct mm_struct *mm = current->mm; current 59 arch/nds32/mm/cacheflush.c if (vma->vm_mm == current->active_mm) { current 307 arch/nds32/mm/cacheflush.c if (vma->vm_mm != current->active_mm) current 82 arch/nds32/mm/fault.c tsk = current; current 213 arch/nds32/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { current 80 arch/nds32/mm/mm-nds32.c if (current->mm && current->mm->pgd) current 81 arch/nds32/mm/mm-nds32.c pgd = current->mm->pgd; current 26 arch/nds32/mm/mmap.c struct mm_struct *mm = current->mm; current 58 arch/nds32/mm/proc.c struct mm_struct *mm = current->mm; current 77 arch/nds32/mm/proc.c struct mm_struct *mm = current->mm; current 162 arch/nios2/kernel/misaligned.c current->pid); current 158 arch/nios2/kernel/process.c pr_emerg("COMM=%s PID=%d\n", current->comm, current->pid); current 160 arch/nios2/kernel/process.c if (current->mm) { current 162 arch/nios2/kernel/process.c (int) current->mm->start_code, current 163 arch/nios2/kernel/process.c (int) current->mm->end_code, current 164 arch/nios2/kernel/process.c (int) current->mm->start_data, current 165 arch/nios2/kernel/process.c (int) current->mm->end_data, current 166 arch/nios2/kernel/process.c (int) current->mm->end_data, current 167 arch/nios2/kernel/process.c (int) current->mm->brk); current 169 arch/nios2/kernel/process.c (int) current->mm->start_stack, current 170 arch/nios2/kernel/process.c (int)(((unsigned long) current) + THREAD_SIZE)); current 226 arch/nios2/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 158 arch/nios2/kernel/ptrace.c ret = tracehook_report_syscall_entry(task_pt_regs(current)); current 166 arch/nios2/kernel/ptrace.c tracehook_report_syscall_exit(task_pt_regs(current), 0); current 46 arch/nios2/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 238 arch/nios2/kernel/signal.c current->thread.kregs = regs; current 41 arch/nios2/kernel/sys_nios2.c vma = find_vma(current->mm, addr); current 75 arch/nios2/mm/cacheflush.c struct mm_struct *mm = current->active_mm; current 46 arch/nios2/mm/fault.c struct task_struct *tsk = current; current 136 arch/nios2/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 156 arch/nios2/mm/fault.c current->maj_flt++; current 158 arch/nios2/mm/fault.c current->min_flt++; current 188 arch/nios2/mm/fault.c if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) { current 190 arch/nios2/mm/fault.c "cause %ld\n", current->comm, SIGSEGV, address, cause); current 112 arch/nios2/mm/init.c struct mm_struct *mm = current->mm; current 258 arch/nios2/mm/tlb.c if (current->mm == mm) { current 55 arch/openrisc/include/asm/tlbflush.h flush_tlb_mm(current->mm); current 45 arch/openrisc/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 117 arch/openrisc/kernel/smp.c current->active_mm = mm; current 71 arch/openrisc/kernel/stacktrace.c if (tsk == current) current 98 arch/openrisc/kernel/traps.c current->comm, current->pid, (unsigned long)current); current 160 arch/openrisc/kernel/traps.c ((struct task_struct *)(__pa(current)))->comm, current 161 arch/openrisc/kernel/traps.c ((struct task_struct *)(__pa(current)))->pid, current 162 arch/openrisc/kernel/traps.c (unsigned long)current); current 55 arch/openrisc/mm/fault.c tsk = current; current 164 arch/openrisc/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 195 arch/parisc/include/asm/compat.h struct pt_regs *regs = ¤t->thread.regs; current 206 arch/parisc/include/asm/compat.h return __is_compat_task(current); current 237 arch/parisc/include/asm/elf.h set_personality((current->personality & ~PER_MASK) | PER_LINUX); \ current 239 arch/parisc/include/asm/elf.h current->thread.map_base = DEFAULT_MAP_BASE; \ current 240 arch/parisc/include/asm/elf.h current->thread.task_size = DEFAULT_TASK_SIZE; \ current 249 arch/parisc/include/asm/elf.h current->thread.map_base = DEFAULT_MAP_BASE32; \ current 250 arch/parisc/include/asm/elf.h current->thread.task_size = DEFAULT_TASK_SIZE32; \ current 93 arch/parisc/include/asm/mmu_context.h switch_mm(prev,next,current); current 26 arch/parisc/include/asm/processor.h #define TASK_SIZE TASK_SIZE_OF(current) current 27 arch/parisc/include/asm/processor.h #define TASK_UNMAPPED_BASE (current->thread.map_base) current 252 arch/parisc/include/asm/processor.h __u32 spaceid = (__u32)current->mm->context; \ current 58 arch/parisc/include/asm/tlbflush.h if (mm == current->active_mm) current 40 arch/parisc/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 435 arch/parisc/kernel/irq.c current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); current 452 arch/parisc/kernel/irq.c current->comm, sp, stack_start, stack_start + THREAD_SIZE); current 202 arch/parisc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 218 arch/parisc/kernel/kprobes.c if (ri->task != current) current 237 arch/parisc/kernel/kprobes.c if (ri->task != current) current 261 arch/parisc/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 272 arch/parisc/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 184 arch/parisc/kernel/ptrace.c task_user_regset_view(current), current 191 arch/parisc/kernel/ptrace.c task_user_regset_view(current), current 198 arch/parisc/kernel/ptrace.c task_user_regset_view(current), current 205 arch/parisc/kernel/ptrace.c task_user_regset_view(current), current 101 arch/parisc/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 187 arch/parisc/kernel/signal.c sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ current 375 arch/parisc/kernel/signal.c if (pa_psw(current)->r) { current 376 arch/parisc/kernel/signal.c pa_psw(current)->r = 0; current 408 arch/parisc/kernel/signal.c current->comm, current->pid, frame, regs->gr[30], current 483 arch/parisc/kernel/signal.c current->comm, task_pid_nr(current), opcode); current 93 arch/parisc/kernel/smp.c switch_to_idle_task(current); current 281 arch/parisc/kernel/smp.c current->active_mm = &init_mm; current 282 arch/parisc/kernel/smp.c BUG_ON(current->mm); current 283 arch/parisc/kernel/smp.c enter_lazy_tlb(&init_mm, current); current 38 arch/parisc/kernel/stacktrace.c dump_trace(current, trace); current 75 arch/parisc/kernel/sys_parisc.c if (current->flags & PF_RANDOMIZE) current 85 arch/parisc/kernel/sys_parisc.c struct mm_struct *mm = current->mm; current 141 arch/parisc/kernel/sys_parisc.c struct mm_struct *mm = current->mm; current 205 arch/parisc/kernel/sys_parisc.c if (current->personality & ADDR_COMPAT_LAYOUT) current 221 arch/parisc/kernel/sys_parisc.c if (current->flags & PF_RANDOMIZE) current 366 arch/parisc/kernel/sys_parisc.c if (personality(current->personality) == PER_LINUX32 current 23 arch/parisc/kernel/sys_parisc32.c current->comm, current->pid, r20); current 158 arch/parisc/kernel/traps.c parisc_show_stack(current, regs); current 219 arch/parisc/kernel/traps.c current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); current 250 arch/parisc/kernel/traps.c current->comm, task_pid_nr(current), str, err); current 253 arch/parisc/kernel/traps.c if (current->thread.flags & PARISC_KERNEL_DEATH) { current 258 arch/parisc/kernel/traps.c current->thread.flags |= PARISC_KERNEL_DEATH; current 319 arch/parisc/kernel/traps.c task_pid_nr(current), current->comm); current 448 arch/parisc/kernel/traps.c unwind_frame_init(&info, current, regs); current 720 arch/parisc/kernel/traps.c down_read(¤t->mm->mmap_sem); current 721 arch/parisc/kernel/traps.c vma = find_vma(current->mm,regs->iaoq[0]); current 728 arch/parisc/kernel/traps.c up_read(¤t->mm->mmap_sem); current 731 arch/parisc/kernel/traps.c up_read(¤t->mm->mmap_sem); current 756 arch/parisc/kernel/traps.c task_pid_nr(current), current->comm); current 773 arch/parisc/kernel/traps.c task_pid_nr(current), current->comm); current 447 arch/parisc/kernel/unaligned.c if (current->thread.flags & PARISC_UAC_SIGBUS) { current 451 arch/parisc/kernel/unaligned.c if (!(current->thread.flags & PARISC_UAC_NOPRINT) && current 455 arch/parisc/kernel/unaligned.c current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]); current 417 arch/parisc/kernel/unwind.c task = task ? task : current; current 419 arch/parisc/kernel/unwind.c if (task == current) { current 471 arch/parisc/kernel/unwind.c unwind_frame_init_task(&info, current, NULL); current 272 arch/parisc/mm/fault.c tsk = current; current 307 arch/parisc/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 327 arch/parisc/mm/fault.c current->maj_flt++; current 329 arch/parisc/mm/fault.c current->min_flt++; current 65 arch/powerpc/include/asm/book3s/32/kup.h .macro kuap_restore sp, current, gpr1, gpr2, gpr3 current 76 arch/powerpc/include/asm/book3s/32/kup.h .macro kuap_check current, gpr current 120 arch/powerpc/include/asm/book3s/32/kup.h current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf); current 137 arch/powerpc/include/asm/book3s/32/kup.h current->thread.kuap = 0; current 118 arch/powerpc/include/asm/compat.h struct pt_regs *regs = current->thread.regs; current 56 arch/powerpc/include/asm/cputime.h struct cpu_accounting_data *acct = get_accounting(current); current 33 arch/powerpc/include/asm/current.h register struct task_struct *current asm ("r2"); current 95 arch/powerpc/include/asm/elf.h if (personality(current->personality) != PER_LINUX32) \ current 97 arch/powerpc/include/asm/elf.h (current->personality & (~PER_MASK))); \ current 174 arch/powerpc/include/asm/elf.h VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \ current 24 arch/powerpc/include/asm/kup.h .macro kuap_restore sp, current, gpr1, gpr2, gpr3 current 27 arch/powerpc/include/asm/kup.h .macro kuap_check current, gpr current 219 arch/powerpc/include/asm/mmu_context.h switch_mm(prev, next, current); current 18 arch/powerpc/include/asm/nohash/32/kup-8xx.h .macro kuap_restore sp, current, gpr1, gpr2, gpr3 current 23 arch/powerpc/include/asm/nohash/32/kup-8xx.h .macro kuap_check current, gpr current 244 arch/powerpc/include/asm/page.h (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ current 44 arch/powerpc/include/asm/pte-walk.h VM_WARN(pgdir != current->mm->pgd, current 178 arch/powerpc/include/asm/ptrace.h ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1) current 108 arch/powerpc/include/asm/sfp-machine.h #define __FPU_FPSCR (current->thread.spefscr) current 128 arch/powerpc/include/asm/sfp-machine.h #define __FPU_FPSCR (current->thread.fp_state.fpscr) current 96 arch/powerpc/include/asm/smp.h #define raw_smp_processor_id() (*(unsigned int *)((void *)current + _TASK_CPU)) current 32 arch/powerpc/include/asm/stackprotector.h current->stack_canary = canary; current 51 arch/powerpc/include/asm/task_size_64.h #define TASK_SIZE TASK_SIZE_OF(current) current 78 arch/powerpc/include/asm/tlb.h WARN_ON(current->mm != mm); current 32 arch/powerpc/include/asm/uaccess.h #define get_fs() (current->thread.addr_limit) current 36 arch/powerpc/include/asm/uaccess.h current->thread.addr_limit = fs; current 117 arch/powerpc/kernel/align.c unsigned long *evr = ¤t->thread.evr[reg]; current 137 arch/powerpc/kernel/align.c flush_spe_to_thread(current); current 65 arch/powerpc/kernel/hw_breakpoint.c if (current->thread.last_hit_ubp != bp) current 227 arch/powerpc/kernel/hw_breakpoint.c current->thread.last_hit_ubp = bp; current 325 arch/powerpc/kernel/hw_breakpoint.c bp = current->thread.last_hit_ubp; current 343 arch/powerpc/kernel/hw_breakpoint.c current->thread.last_hit_ubp = NULL; current 335 arch/powerpc/kernel/kgdb.c if (current) current 336 arch/powerpc/kernel/kgdb.c memcpy(mem, ¤t->thread.evr[regno-32], current 361 arch/powerpc/kernel/kgdb.c memcpy(¤t->thread.evr[regno-32], mem, current 406 arch/powerpc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 422 arch/powerpc/kernel/kprobes.c if (ri->task != current) current 461 arch/powerpc/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 534 arch/powerpc/kernel/mce.c level, evt->cpu, current->pid, current->comm, current 36 arch/powerpc/kernel/mce_power.c mm = current->mm; current 594 arch/powerpc/kernel/pci-common.c current->comm, current->pid, pci_domain_nr(bus), bus->number); current 96 arch/powerpc/kernel/process.c if (tsk == current && tsk->thread.regs && current 207 arch/powerpc/kernel/process.c BUG_ON(tsk != current); current 223 arch/powerpc/kernel/process.c if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { current 224 arch/powerpc/kernel/process.c check_if_tm_restore_required(current); current 233 arch/powerpc/kernel/process.c MSR_TM_ACTIVE(current->thread.regs->msr)) current 235 arch/powerpc/kernel/process.c __giveup_fpu(current); current 243 arch/powerpc/kernel/process.c load_fp_state(¤t->thread.fp_state); current 244 arch/powerpc/kernel/process.c current->thread.load_fp++; current 288 arch/powerpc/kernel/process.c if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) { current 289 arch/powerpc/kernel/process.c check_if_tm_restore_required(current); current 298 arch/powerpc/kernel/process.c MSR_TM_ACTIVE(current->thread.regs->msr)) current 300 arch/powerpc/kernel/process.c __giveup_altivec(current); current 314 arch/powerpc/kernel/process.c BUG_ON(tsk != current); current 373 arch/powerpc/kernel/process.c if (current->thread.regs && current 374 arch/powerpc/kernel/process.c (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) { current 375 arch/powerpc/kernel/process.c check_if_tm_restore_required(current); current 384 arch/powerpc/kernel/process.c MSR_TM_ACTIVE(current->thread.regs->msr)) current 386 arch/powerpc/kernel/process.c __giveup_vsx(current); current 396 arch/powerpc/kernel/process.c BUG_ON(tsk != current); current 434 arch/powerpc/kernel/process.c if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) { current 435 arch/powerpc/kernel/process.c check_if_tm_restore_required(current); current 436 arch/powerpc/kernel/process.c __giveup_spe(current); current 446 arch/powerpc/kernel/process.c BUG_ON(tsk != current); current 529 arch/powerpc/kernel/process.c !current->thread.load_fp && !loadvec(current->thread)) current 539 arch/powerpc/kernel/process.c if ((!(msr & MSR_FP)) && restore_fp(current)) current 540 arch/powerpc/kernel/process.c msr |= MSR_FP | current->thread.fpexc_mode; current 542 arch/powerpc/kernel/process.c if ((!(msr & MSR_VEC)) && restore_altivec(current)) current 546 arch/powerpc/kernel/process.c restore_vsx(current)) { current 588 arch/powerpc/kernel/process.c BUG_ON(tsk != current); current 604 arch/powerpc/kernel/process.c current->thread.trap_nr = TRAP_HWBKPT; current 617 arch/powerpc/kernel/process.c current->thread.trap_nr = TRAP_HWBKPT; current 700 arch/powerpc/kernel/process.c if ((current->thread.debug.dbcr0 & DBCR0_IDM) current 876 arch/powerpc/kernel/process.c tm_reclaim_thread(¤t->thread, cause); current 1031 arch/powerpc/kernel/process.c msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; current 1036 arch/powerpc/kernel/process.c current->thread.load_fp = 1; current 1039 arch/powerpc/kernel/process.c current->thread.load_vec = 1; current 1132 arch/powerpc/kernel/process.c old_thread = ¤t->thread; current 1197 arch/powerpc/kernel/process.c if (current->thread.regs) { current 1198 arch/powerpc/kernel/process.c restore_math(current->thread.regs); current 1208 arch/powerpc/kernel/process.c if (current->thread.used_vas) current 1270 arch/powerpc/kernel/process.c current->comm, current->pid); current 1292 arch/powerpc/kernel/process.c pr_info("%s[%d]: code: %s\n", current->comm, current 1293 arch/powerpc/kernel/process.c current->pid, s.buffer); current 1434 arch/powerpc/kernel/process.c show_stack(current, (unsigned long *) regs->gpr[1]); current 1442 arch/powerpc/kernel/process.c flush_ptrace_hw_breakpoint(current); current 1444 arch/powerpc/kernel/process.c set_debug_reg_defaults(¤t->thread); current 1463 arch/powerpc/kernel/process.c current->thread.used_vas = 1; current 1517 arch/powerpc/kernel/process.c if (t != current) current 1673 arch/powerpc/kernel/process.c p->thread.dscr_inherit = current->thread.dscr_inherit; current 1705 arch/powerpc/kernel/process.c if (!current->thread.regs) { current 1706 arch/powerpc/kernel/process.c struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; current 1707 arch/powerpc/kernel/process.c current->thread.regs = regs - 1; current 1787 arch/powerpc/kernel/process.c current->thread.used_vsr = 0; current 1789 arch/powerpc/kernel/process.c current->thread.load_slb = 0; current 1790 arch/powerpc/kernel/process.c current->thread.load_fp = 0; current 1791 arch/powerpc/kernel/process.c memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); current 1792 arch/powerpc/kernel/process.c current->thread.fp_save_area = NULL; current 1794 arch/powerpc/kernel/process.c memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); current 1795 arch/powerpc/kernel/process.c current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ current 1796 arch/powerpc/kernel/process.c current->thread.vr_save_area = NULL; current 1797 arch/powerpc/kernel/process.c current->thread.vrsave = 0; current 1798 arch/powerpc/kernel/process.c current->thread.used_vr = 0; current 1799 arch/powerpc/kernel/process.c current->thread.load_vec = 0; current 1802 arch/powerpc/kernel/process.c memset(current->thread.evr, 0, sizeof(current->thread.evr)); current 1803 arch/powerpc/kernel/process.c current->thread.acc = 0; current 1804 arch/powerpc/kernel/process.c current->thread.spefscr = 0; current 1805 arch/powerpc/kernel/process.c current->thread.used_spe = 0; current 1808 arch/powerpc/kernel/process.c current->thread.tm_tfhar = 0; current 1809 arch/powerpc/kernel/process.c current->thread.tm_texasr = 0; current 1810 arch/powerpc/kernel/process.c current->thread.tm_tfiar = 0; current 1811 arch/powerpc/kernel/process.c current->thread.load_tm = 0; current 1814 arch/powerpc/kernel/process.c thread_pkey_regs_init(¤t->thread); current 1994 arch/powerpc/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 2042 arch/powerpc/kernel/process.c tsk = current; current 2049 arch/powerpc/kernel/process.c if (tsk == current) current 2067 arch/powerpc/kernel/process.c ret_addr = ftrace_graph_ret_addr(current, current 2147 arch/powerpc/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 138 arch/powerpc/kernel/ptrace.c if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current)) current 282 arch/powerpc/kernel/ptrace32.c child, task_user_regset_view(current), 0, current 288 arch/powerpc/kernel/ptrace32.c child, task_user_regset_view(current), 0, current 111 arch/powerpc/kernel/signal.c BUG_ON(tsk != current); current 163 arch/powerpc/kernel/signal.c klp_update_patch_state(current); current 166 arch/powerpc/kernel/signal.c BUG_ON(regs != current->thread.regs); current 167 arch/powerpc/kernel/signal.c do_signal(current); current 206 arch/powerpc/kernel/signal.c BUG_ON(tsk != current); current 390 arch/powerpc/kernel/signal_32.c flush_fp_to_thread(current); current 398 arch/powerpc/kernel/signal_32.c if (current->thread.used_vr) { current 399 arch/powerpc/kernel/signal_32.c flush_altivec_to_thread(current); current 400 arch/powerpc/kernel/signal_32.c if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, current 416 arch/powerpc/kernel/signal_32.c current->thread.vrsave = mfspr(SPRN_VRSAVE); current 417 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) current 420 arch/powerpc/kernel/signal_32.c if (copy_fpr_to_user(&frame->mc_fregs, current)) current 435 arch/powerpc/kernel/signal_32.c if (current->thread.used_vsr && ctx_has_vsx_region) { current 436 arch/powerpc/kernel/signal_32.c flush_vsx_to_thread(current); current 437 arch/powerpc/kernel/signal_32.c if (copy_vsx_to_user(&frame->mc_vsregs, current)) current 444 arch/powerpc/kernel/signal_32.c if (current->thread.used_spe) { current 445 arch/powerpc/kernel/signal_32.c flush_spe_to_thread(current); current 446 arch/powerpc/kernel/signal_32.c if (__copy_to_user(&frame->mc_vregs, current->thread.evr, current 456 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) current 498 arch/powerpc/kernel/signal_32.c if (save_general_regs(¤t->thread.ckpt_regs, frame) current 513 arch/powerpc/kernel/signal_32.c if (current->thread.used_vr) { current 514 arch/powerpc/kernel/signal_32.c if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state, current 519 arch/powerpc/kernel/signal_32.c ¤t->thread.vr_state, current 524 arch/powerpc/kernel/signal_32.c ¤t->thread.ckvr_state, current 541 arch/powerpc/kernel/signal_32.c current->thread.ckvrsave = mfspr(SPRN_VRSAVE); current 542 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.ckvrsave, current 546 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.vrsave, current 550 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.ckvrsave, current 556 arch/powerpc/kernel/signal_32.c if (copy_ckfpr_to_user(&frame->mc_fregs, current)) current 559 arch/powerpc/kernel/signal_32.c if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) current 562 arch/powerpc/kernel/signal_32.c if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current)) current 573 arch/powerpc/kernel/signal_32.c if (current->thread.used_vsr) { current 574 arch/powerpc/kernel/signal_32.c if (copy_ckvsx_to_user(&frame->mc_vsregs, current)) current 578 arch/powerpc/kernel/signal_32.c current)) current 581 arch/powerpc/kernel/signal_32.c if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current)) current 592 arch/powerpc/kernel/signal_32.c if (current->thread.used_spe) { current 593 arch/powerpc/kernel/signal_32.c flush_spe_to_thread(current); current 594 arch/powerpc/kernel/signal_32.c if (__copy_to_user(&frame->mc_vregs, current->thread.evr, current 603 arch/powerpc/kernel/signal_32.c if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) current 662 arch/powerpc/kernel/signal_32.c if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, current 665 arch/powerpc/kernel/signal_32.c current->thread.used_vr = true; current 666 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_vr) current 667 arch/powerpc/kernel/signal_32.c memset(¤t->thread.vr_state, 0, current 671 arch/powerpc/kernel/signal_32.c if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) current 674 arch/powerpc/kernel/signal_32.c mtspr(SPRN_VRSAVE, current->thread.vrsave); current 676 arch/powerpc/kernel/signal_32.c if (copy_fpr_from_user(current, &sr->mc_fregs)) current 690 arch/powerpc/kernel/signal_32.c if (copy_vsx_from_user(current, &sr->mc_vsregs)) current 692 arch/powerpc/kernel/signal_32.c current->thread.used_vsr = true; current 693 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_vsr) current 695 arch/powerpc/kernel/signal_32.c current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current 709 arch/powerpc/kernel/signal_32.c if (__copy_from_user(current->thread.evr, &sr->mc_vregs, current 712 arch/powerpc/kernel/signal_32.c current->thread.used_spe = true; current 713 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_spe) current 714 arch/powerpc/kernel/signal_32.c memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); current 717 arch/powerpc/kernel/signal_32.c if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) current 750 arch/powerpc/kernel/signal_32.c err |= restore_general_regs(¤t->thread.ckpt_regs, sr); current 752 arch/powerpc/kernel/signal_32.c err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); current 765 arch/powerpc/kernel/signal_32.c if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs, current 767 arch/powerpc/kernel/signal_32.c __copy_from_user(¤t->thread.vr_state, current 771 arch/powerpc/kernel/signal_32.c current->thread.used_vr = true; current 772 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_vr) { current 773 arch/powerpc/kernel/signal_32.c memset(¤t->thread.vr_state, 0, current 775 arch/powerpc/kernel/signal_32.c memset(¤t->thread.ckvr_state, 0, current 780 arch/powerpc/kernel/signal_32.c if (__get_user(current->thread.ckvrsave, current 782 arch/powerpc/kernel/signal_32.c __get_user(current->thread.vrsave, current 786 arch/powerpc/kernel/signal_32.c mtspr(SPRN_VRSAVE, current->thread.ckvrsave); current 791 arch/powerpc/kernel/signal_32.c if (copy_fpr_from_user(current, &sr->mc_fregs) || current 792 arch/powerpc/kernel/signal_32.c copy_ckfpr_from_user(current, &tm_sr->mc_fregs)) current 802 arch/powerpc/kernel/signal_32.c if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) || current 803 arch/powerpc/kernel/signal_32.c copy_ckvsx_from_user(current, &sr->mc_vsregs)) current 805 arch/powerpc/kernel/signal_32.c current->thread.used_vsr = true; current 806 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_vsr) current 808 arch/powerpc/kernel/signal_32.c current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current 809 arch/powerpc/kernel/signal_32.c current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current 819 arch/powerpc/kernel/signal_32.c if (__copy_from_user(current->thread.evr, &sr->mc_vregs, current 822 arch/powerpc/kernel/signal_32.c current->thread.used_spe = true; current 823 arch/powerpc/kernel/signal_32.c } else if (current->thread.used_spe) current 824 arch/powerpc/kernel/signal_32.c memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); current 827 arch/powerpc/kernel/signal_32.c if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs current 863 arch/powerpc/kernel/signal_32.c current->thread.tm_texasr |= TEXASR_FS; current 865 arch/powerpc/kernel/signal_32.c tm_recheckpoint(¤t->thread); current 870 arch/powerpc/kernel/signal_32.c load_fp_state(¤t->thread.fp_state); current 871 arch/powerpc/kernel/signal_32.c regs->msr |= (MSR_FP | current->thread.fpexc_mode); current 875 arch/powerpc/kernel/signal_32.c load_vr_state(¤t->thread.vr_state); current 912 arch/powerpc/kernel/signal_32.c BUG_ON(tsk != current); current 1117 arch/powerpc/kernel/signal_32.c || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) current 1160 arch/powerpc/kernel/signal_32.c current->restart_block.fn = do_no_restart_syscall; current 1244 arch/powerpc/kernel/signal_32.c current->comm, current->pid, current 1260 arch/powerpc/kernel/signal_32.c unsigned long new_dbcr0 = current->thread.debug.dbcr0; current 1275 arch/powerpc/kernel/signal_32.c current->thread.debug.dbcr1)) { current 1310 arch/powerpc/kernel/signal_32.c current->thread.debug.dbcr0 = new_dbcr0; current 1333 arch/powerpc/kernel/signal_32.c current->comm, current->pid, current 1373 arch/powerpc/kernel/signal_32.c BUG_ON(tsk != current); current 1467 arch/powerpc/kernel/signal_32.c current->restart_block.fn = do_no_restart_syscall; current 1515 arch/powerpc/kernel/signal_32.c current->comm, current->pid, current 113 arch/powerpc/kernel/signal_64.c BUG_ON(tsk != current); current 213 arch/powerpc/kernel/signal_64.c BUG_ON(tsk != current); current 335 arch/powerpc/kernel/signal_64.c BUG_ON(tsk != current); current 432 arch/powerpc/kernel/signal_64.c BUG_ON(tsk != current); current 506 arch/powerpc/kernel/signal_64.c current->thread.used_vr = true; current 665 arch/powerpc/kernel/signal_64.c || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0, current 668 arch/powerpc/kernel/signal_64.c ¤t->blocked, sizeof(sigset_t))) current 693 arch/powerpc/kernel/signal_64.c if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) current 716 arch/powerpc/kernel/signal_64.c current->restart_block.fn = do_no_restart_syscall; current 776 arch/powerpc/kernel/signal_64.c if (restore_tm_sigcontexts(current, &uc->uc_mcontext, current 794 arch/powerpc/kernel/signal_64.c current->thread.regs->msr &= ~MSR_TS_MASK; current 795 arch/powerpc/kernel/signal_64.c if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) current 808 arch/powerpc/kernel/signal_64.c current->comm, current->pid, "rt_sigreturn", current 827 arch/powerpc/kernel/signal_64.c BUG_ON(tsk != current); current 892 arch/powerpc/kernel/smp.c paca_ptrs[boot_cpuid]->__current = current; current 895 arch/powerpc/kernel/smp.c current_set[boot_cpuid] = current; current 1237 arch/powerpc/kernel/smp.c current->active_mm = &init_mm; current 62 arch/powerpc/kernel/stacktrace.c save_context_stack(trace, sp, current, 1); current 73 arch/powerpc/kernel/stacktrace.c if (tsk == current) current 87 arch/powerpc/kernel/stacktrace.c save_context_stack(trace, regs->gpr[1], current, 0); current 133 arch/powerpc/kernel/stacktrace.c if (tsk == current) current 20 arch/powerpc/kernel/swsusp.c flush_all_to_thread(current); current 31 arch/powerpc/kernel/swsusp.c switch_mmu_context(current->active_mm, current->active_mm, NULL); current 104 arch/powerpc/kernel/syscalls.c if (personality(current->personality) == PER_LINUX32 current 125 arch/powerpc/kernel/syscalls.c current->thread.regs->msr ^= MSR_LE; current 535 arch/powerpc/kernel/sysfs.c if (!current->thread.dscr_inherit) { current 536 arch/powerpc/kernel/sysfs.c current->thread.dscr = *(unsigned long *)val; current 955 arch/powerpc/kernel/trace/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 145 arch/powerpc/kernel/traps.c if (kexec_should_crash(current)) current 148 arch/powerpc/kernel/traps.c !current->pid || is_global_init(current)) current 230 arch/powerpc/kernel/traps.c if (kexec_should_crash(current)) current 242 arch/powerpc/kernel/traps.c if (in_interrupt() || panic_on_oops || !current->pid || current 243 arch/powerpc/kernel/traps.c is_global_init(current)) { current 319 arch/powerpc/kernel/traps.c if (!unhandled_signal(current, signr)) current 326 arch/powerpc/kernel/traps.c current->comm, current->pid, signame(signr), signr, current 349 arch/powerpc/kernel/traps.c current->thread.trap_nr = code; current 355 arch/powerpc/kernel/traps.c thread_pkey_regs_save(¤t->thread); current 588 arch/powerpc/kernel/traps.c #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC) current 589 arch/powerpc/kernel/traps.c #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC) current 896 arch/powerpc/kernel/traps.c smp_processor_id(), current->comm, current->pid, current 903 arch/powerpc/kernel/traps.c flush_vsx_to_thread(current); current 917 arch/powerpc/kernel/traps.c vdst = (u8 *)¤t->thread.vr_state.vr[t]; current 919 arch/powerpc/kernel/traps.c vdst = (u8 *)¤t->thread.fp_state.fpr[t][0]; current 931 arch/powerpc/kernel/traps.c smp_processor_id(), current->comm, current->pid, current 946 arch/powerpc/kernel/traps.c smp_processor_id(), current->comm, current->pid, current 953 arch/powerpc/kernel/traps.c smp_processor_id(), current->comm, current->pid, regs->nip, current 972 arch/powerpc/kernel/traps.c smp_processor_id(), current->comm, current->pid, current 1183 arch/powerpc/kernel/traps.c flush_fp_to_thread(current); current 1185 arch/powerpc/kernel/traps.c code = __parse_fpscr(current->thread.fp_state.fpscr); current 1414 arch/powerpc/kernel/traps.c current->thread.dscr = regs->gpr[rd]; current 1415 arch/powerpc/kernel/traps.c current->thread.dscr_inherit = 1; current 1416 arch/powerpc/kernel/traps.c mtspr(SPRN_DSCR, current->thread.dscr); current 1445 arch/powerpc/kernel/traps.c code = __parse_fpscr(current->thread.fp_state.fpscr); current 1605 arch/powerpc/kernel/traps.c if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) current 1634 arch/powerpc/kernel/traps.c current->comm, task_pid_nr(current), regs->gpr[1]); current 1689 arch/powerpc/kernel/traps.c current->thread.load_tm++; current 1692 arch/powerpc/kernel/traps.c tm_restore_sprs(¤t->thread); current 1770 arch/powerpc/kernel/traps.c current->thread.dscr = regs->gpr[rd]; current 1771 arch/powerpc/kernel/traps.c current->thread.dscr_inherit = 1; current 1772 arch/powerpc/kernel/traps.c current->thread.fscr |= FSCR_DSCR; current 1773 arch/powerpc/kernel/traps.c mtspr(SPRN_FSCR, current->thread.fscr); current 1845 arch/powerpc/kernel/traps.c current->thread.load_fp = 1; current 1850 arch/powerpc/kernel/traps.c tm_recheckpoint(¤t->thread); current 1863 arch/powerpc/kernel/traps.c current->thread.load_vec = 1; current 1864 arch/powerpc/kernel/traps.c tm_recheckpoint(¤t->thread); current 1865 arch/powerpc/kernel/traps.c current->thread.used_vr = 1; current 1881 arch/powerpc/kernel/traps.c current->thread.used_vsr = 1; current 1886 arch/powerpc/kernel/traps.c current->thread.load_vec = 1; current 1887 arch/powerpc/kernel/traps.c current->thread.load_fp = 1; current 1889 arch/powerpc/kernel/traps.c tm_recheckpoint(¤t->thread); current 1909 arch/powerpc/kernel/traps.c dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); current 1911 arch/powerpc/kernel/traps.c current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; current 1917 arch/powerpc/kernel/traps.c dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); current 1922 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IAC1; current 1923 arch/powerpc/kernel/traps.c dbcr_iac_range(current) &= ~DBCR_IAC12MODE; current 1928 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IAC2; current 1933 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IAC3; current 1934 arch/powerpc/kernel/traps.c dbcr_iac_range(current) &= ~DBCR_IAC34MODE; current 1939 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IAC4; current 1949 arch/powerpc/kernel/traps.c if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, current 1950 arch/powerpc/kernel/traps.c current->thread.debug.dbcr1)) current 1954 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IDM; current 1957 arch/powerpc/kernel/traps.c mtspr(SPRN_DBCR0, current->thread.debug.dbcr0); current 1962 arch/powerpc/kernel/traps.c current->thread.debug.dbsr = debug_status; current 1979 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_BT; current 1980 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; current 2014 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IC; current 2015 arch/powerpc/kernel/traps.c if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0, current 2016 arch/powerpc/kernel/traps.c current->thread.debug.dbcr1)) current 2020 arch/powerpc/kernel/traps.c current->thread.debug.dbcr0 &= ~DBCR0_IDM; current 2049 arch/powerpc/kernel/traps.c flush_altivec_to_thread(current); current 2066 arch/powerpc/kernel/traps.c "in %s at %lx\n", current->comm, regs->nip); current 2067 arch/powerpc/kernel/traps.c current->thread.vr_state.vscr.u[3] |= 0x10000; current 2099 arch/powerpc/kernel/traps.c flush_spe_to_thread(current); current 2101 arch/powerpc/kernel/traps.c spefscr = current->thread.spefscr; current 2102 arch/powerpc/kernel/traps.c fpexc_mode = current->thread.fpexc_mode; current 2131 arch/powerpc/kernel/traps.c "in %s at %lx\n", current->comm, regs->nip); current 2150 arch/powerpc/kernel/traps.c giveup_spe(current); current 2167 arch/powerpc/kernel/traps.c "in %s at %lx\n", current->comm, regs->nip); current 2266 arch/powerpc/kernel/traps.c pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, current 53 arch/powerpc/kernel/uprobes.c struct arch_uprobe_task *autask = ¤t->utask->autask; current 55 arch/powerpc/kernel/uprobes.c autask->saved_trap_nr = current->thread.trap_nr; current 56 arch/powerpc/kernel/uprobes.c current->thread.trap_nr = UPROBE_TRAP_NR; current 57 arch/powerpc/kernel/uprobes.c regs->nip = current->utask->xol_vaddr; current 59 arch/powerpc/kernel/uprobes.c user_enable_single_step(current); current 101 arch/powerpc/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 103 arch/powerpc/kernel/uprobes.c WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current 105 arch/powerpc/kernel/uprobes.c current->thread.trap_nr = utask->autask.saved_trap_nr; current 116 arch/powerpc/kernel/uprobes.c user_disable_single_step(current); current 156 arch/powerpc/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 158 arch/powerpc/kernel/uprobes.c current->thread.trap_nr = utask->autask.saved_trap_nr; current 161 arch/powerpc/kernel/uprobes.c user_disable_single_step(current); current 128 arch/powerpc/kernel/vdso.c struct mm_struct *mm = current->mm; current 158 arch/powerpc/kernel/vdso.c current->mm->context.vdso_base = 0; current 193 arch/powerpc/kernel/vdso.c current->mm->context.vdso_base = vdso_base; current 210 arch/powerpc/kernel/vdso.c current->mm->context.vdso_base = 0; current 276 arch/powerpc/kernel/vecemu.c vrs = current->thread.vr_state.vr; current 325 arch/powerpc/kernel/vecemu.c ¤t->thread.vr_state.vscr.u[3]); current 330 arch/powerpc/kernel/vecemu.c ¤t->thread.vr_state.vscr.u[3]); current 122 arch/powerpc/kernel/watchdog.c print_irqtrace_events(current); current 212 arch/powerpc/kernel/watchdog.c print_irqtrace_events(current); current 278 arch/powerpc/kernel/watchdog.c print_irqtrace_events(current); current 299 arch/powerpc/kvm/book3s_64_mmu_hv.c current->mm->pgd, false, pte_idx_ret); current 595 arch/powerpc/kvm/book3s_64_mmu_hv.c down_read(¤t->mm->mmap_sem); current 596 arch/powerpc/kvm/book3s_64_mmu_hv.c vma = find_vma(current->mm, hva); current 605 arch/powerpc/kvm/book3s_64_mmu_hv.c up_read(¤t->mm->mmap_sem); current 624 arch/powerpc/kvm/book3s_64_mmu_hv.c ptep = find_current_mm_pte(current->mm->pgd, current 258 arch/powerpc/kvm/book3s_64_vio.c account_locked_vm(current->mm, current 283 arch/powerpc/kvm/book3s_64_vio.c ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true); current 329 arch/powerpc/kvm/book3s_64_vio.c account_locked_vm(current->mm, kvmppc_stt_pages(npages), false); current 3925 arch/powerpc/kvm/book3s_hv.c vcpu->arch.run_task = current; current 3938 arch/powerpc/kvm/book3s_hv.c if (!signal_pending(current)) { current 3952 arch/powerpc/kvm/book3s_hv.c !signal_pending(current)) { current 4054 arch/powerpc/kvm/book3s_hv.c vcpu->arch.run_task = current; current 4083 arch/powerpc/kvm/book3s_hv.c if (signal_pending(current)) current 4173 arch/powerpc/kvm/book3s_hv.c r = kvmppc_handle_exit_hv(kvm_run, vcpu, current); current 4183 arch/powerpc/kvm/book3s_hv.c if (signal_pending(current)) { current 4236 arch/powerpc/kvm/book3s_hv.c if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && current 4237 arch/powerpc/kvm/book3s_hv.c (current->thread.regs->msr & MSR_TM)) { current 4238 arch/powerpc/kvm/book3s_hv.c if (MSR_TM_ACTIVE(current->thread.regs->msr)) { current 4245 arch/powerpc/kvm/book3s_hv.c current->thread.tm_tfhar = mfspr(SPRN_TFHAR); current 4246 arch/powerpc/kvm/book3s_hv.c current->thread.tm_tfiar = mfspr(SPRN_TFIAR); current 4247 arch/powerpc/kvm/book3s_hv.c current->thread.tm_texasr = mfspr(SPRN_TEXASR); current 4248 arch/powerpc/kvm/book3s_hv.c current->thread.regs->msr &= ~MSR_TM; current 4264 arch/powerpc/kvm/book3s_hv.c if (signal_pending(current)) { current 4274 arch/powerpc/kvm/book3s_hv.c flush_all_to_thread(current); current 4286 arch/powerpc/kvm/book3s_hv.c vcpu->arch.pgdir = current->mm->pgd; current 4330 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_FSCR, current->thread.fscr); current 4618 arch/powerpc/kvm/book3s_hv.c down_read(¤t->mm->mmap_sem); current 4619 arch/powerpc/kvm/book3s_hv.c vma = find_vma(current->mm, hva); current 4625 arch/powerpc/kvm/book3s_hv.c up_read(¤t->mm->mmap_sem); current 4658 arch/powerpc/kvm/book3s_hv.c up_read(¤t->mm->mmap_sem); current 4921 arch/powerpc/kvm/book3s_hv.c snprintf(buf, sizeof(buf), "vm%d", current->pid); current 112 arch/powerpc/kvm/book3s_pr.c current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; current 786 arch/powerpc/kvm/book3s_pr.c struct thread_struct *t = ¤t->thread; current 810 arch/powerpc/kvm/book3s_pr.c giveup_fpu(current); current 816 arch/powerpc/kvm/book3s_pr.c if (current->thread.regs->msr & MSR_VEC) current 817 arch/powerpc/kvm/book3s_pr.c giveup_altivec(current); current 838 arch/powerpc/kvm/book3s_pr.c mtspr(SPRN_TAR, current->thread.tar); current 849 arch/powerpc/kvm/book3s_pr.c struct thread_struct *t = ¤t->thread; current 921 arch/powerpc/kvm/book3s_pr.c lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; current 941 arch/powerpc/kvm/book3s_pr.c current->thread.regs->msr |= lost_ext; current 999 arch/powerpc/kvm/book3s_pr.c current->thread.tar = mfspr(SPRN_TAR); current 1833 arch/powerpc/kvm/book3s_pr.c giveup_all(current); current 135 arch/powerpc/kvm/booke.c if (!(current->thread.regs->msr & MSR_FP)) { current 139 arch/powerpc/kvm/booke.c current->thread.fp_save_area = &vcpu->arch.fp; current 140 arch/powerpc/kvm/booke.c current->thread.regs->msr |= MSR_FP; current 152 arch/powerpc/kvm/booke.c if (current->thread.regs->msr & MSR_FP) current 153 arch/powerpc/kvm/booke.c giveup_fpu(current); current 154 arch/powerpc/kvm/booke.c current->thread.fp_save_area = NULL; current 177 arch/powerpc/kvm/booke.c if (!(current->thread.regs->msr & MSR_VEC)) { current 181 arch/powerpc/kvm/booke.c current->thread.vr_save_area = &vcpu->arch.vr; current 182 arch/powerpc/kvm/booke.c current->thread.regs->msr |= MSR_VEC; current 196 arch/powerpc/kvm/booke.c if (current->thread.regs->msr & MSR_VEC) current 197 arch/powerpc/kvm/booke.c giveup_altivec(current); current 198 arch/powerpc/kvm/booke.c current->thread.vr_save_area = NULL; current 775 arch/powerpc/kvm/booke.c debug = current->thread.debug; current 776 arch/powerpc/kvm/booke.c current->thread.debug = vcpu->arch.dbg_reg; current 778 arch/powerpc/kvm/booke.c vcpu->arch.pgdir = current->mm->pgd; current 788 arch/powerpc/kvm/booke.c current->thread.debug = debug; current 2095 arch/powerpc/kvm/booke.c current->thread.kvm_vcpu = vcpu; current 2100 arch/powerpc/kvm/booke.c current->thread.kvm_vcpu = NULL; current 373 arch/powerpc/kvm/booke_emulate.c current->thread.debug = vcpu->arch.dbg_reg; current 358 arch/powerpc/kvm/e500_mmu_host.c down_read(¤t->mm->mmap_sem); current 360 arch/powerpc/kvm/e500_mmu_host.c vma = find_vma(current->mm, hva); current 444 arch/powerpc/kvm/e500_mmu_host.c up_read(¤t->mm->mmap_sem); current 117 arch/powerpc/kvm/mpic.c struct kvm_vcpu *vcpu = current->thread.kvm_vcpu; current 92 arch/powerpc/kvm/powerpc.c if (signal_pending(current)) { current 213 arch/powerpc/kvm/timing.c current->pid, id); current 402 arch/powerpc/kvm/trace_hv.h __entry->tgid = current->tgid; current 426 arch/powerpc/kvm/trace_hv.h __entry->tgid = current->tgid; current 448 arch/powerpc/kvm/trace_hv.h __entry->tgid = current->tgid; current 468 arch/powerpc/kvm/trace_hv.h __entry->tgid = current->tgid; current 495 arch/powerpc/lib/sstep.c current->thread.TS_FPR(rn) = u.l[0]; current 502 arch/powerpc/lib/sstep.c current->thread.TS_FPR(rn) = u.l[1]; current 529 arch/powerpc/lib/sstep.c u.l[0] = current->thread.TS_FPR(rn); current 541 arch/powerpc/lib/sstep.c u.l[1] = current->thread.TS_FPR(rn); current 579 arch/powerpc/lib/sstep.c current->thread.vr_state.vr[rn] = u.v; current 602 arch/powerpc/lib/sstep.c u.v = current->thread.vr_state.vr[rn]; current 835 arch/powerpc/lib/sstep.c current->thread.fp_state.fpr[reg][0] = buf.d[0]; current 836 arch/powerpc/lib/sstep.c current->thread.fp_state.fpr[reg][1] = buf.d[1]; current 842 arch/powerpc/lib/sstep.c current->thread.vr_state.vr[reg - 32] = buf.v; current 866 arch/powerpc/lib/sstep.c buf.d[0] = current->thread.fp_state.fpr[reg][0]; current 867 arch/powerpc/lib/sstep.c buf.d[1] = current->thread.fp_state.fpr[reg][1]; current 873 arch/powerpc/lib/sstep.c buf.v = current->thread.vr_state.vr[reg - 32]; current 2708 arch/powerpc/lib/sstep.c if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { current 332 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 333 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); current 334 arch/powerpc/math-emu/math.c op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); current 338 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 339 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); current 340 arch/powerpc/math-emu/math.c op2 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); current 344 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 345 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); current 346 arch/powerpc/math-emu/math.c op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); current 347 arch/powerpc/math-emu/math.c op3 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); current 353 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 363 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 368 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 372 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 373 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); current 377 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 378 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); current 383 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 392 arch/powerpc/math-emu/math.c op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); current 400 arch/powerpc/math-emu/math.c op2 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); current 401 arch/powerpc/math-emu/math.c op3 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); current 421 arch/powerpc/math-emu/math.c op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); current 433 arch/powerpc/math-emu/math.c flush_fp_to_thread(current); current 199 arch/powerpc/math-emu/math_efp.c vc.wp[0] = current->thread.evr[fc]; current 201 arch/powerpc/math-emu/math_efp.c va.wp[0] = current->thread.evr[fa]; current 203 arch/powerpc/math-emu/math_efp.c vb.wp[0] = current->thread.evr[fb]; current 676 arch/powerpc/math-emu/math_efp.c &= ~(FP_EX_INVALID | FP_EX_UNDERFLOW) | current->thread.spefscr_last; current 679 arch/powerpc/math-emu/math_efp.c current->thread.spefscr_last = __FPU_FPSCR; current 681 arch/powerpc/math-emu/math_efp.c current->thread.evr[fc] = vc.wp[0]; current 691 arch/powerpc/math-emu/math_efp.c if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) { current 693 arch/powerpc/math-emu/math_efp.c && (current->thread.fpexc_mode & PR_FP_EXC_DIV)) current 696 arch/powerpc/math-emu/math_efp.c && (current->thread.fpexc_mode & PR_FP_EXC_OVF)) current 699 arch/powerpc/math-emu/math_efp.c && (current->thread.fpexc_mode & PR_FP_EXC_UND)) current 702 arch/powerpc/math-emu/math_efp.c && (current->thread.fpexc_mode & PR_FP_EXC_RES)) current 705 arch/powerpc/math-emu/math_efp.c && (current->thread.fpexc_mode & PR_FP_EXC_INV)) current 752 arch/powerpc/math-emu/math_efp.c s_hi = current->thread.evr[fc] & SIGN_BIT_S; current 753 arch/powerpc/math-emu/math_efp.c fgpr.wp[0] = current->thread.evr[fc]; current 798 arch/powerpc/math-emu/math_efp.c s_hi = current->thread.evr[fb] & SIGN_BIT_S; current 807 arch/powerpc/math-emu/math_efp.c s_hi = current->thread.evr[fb] & SIGN_BIT_S; current 879 arch/powerpc/math-emu/math_efp.c current->thread.evr[fc] = fgpr.wp[0]; current 884 arch/powerpc/math-emu/math_efp.c if (current->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) current 885 arch/powerpc/math-emu/math_efp.c return (current->thread.fpexc_mode & PR_FP_EXC_RES) ? 1 : 0; current 334 arch/powerpc/mm/book3s32/mmu.c if (!current->thread.regs) current 338 arch/powerpc/mm/book3s32/mmu.c if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) current 1148 arch/powerpc/mm/book3s64/hash_utils.c if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) { current 1215 arch/powerpc/mm/book3s64/hash_utils.c ea, access, current->comm); current 1363 arch/powerpc/mm/book3s64/hash_utils.c if (current->mm == mm) current 1407 arch/powerpc/mm/book3s64/hash_utils.c if (current->mm == mm) current 1450 arch/powerpc/mm/book3s64/hash_utils.c struct mm_struct *mm = current->mm; current 1468 arch/powerpc/mm/book3s64/hash_utils.c struct mm_struct *mm = current->mm; current 1638 arch/powerpc/mm/book3s64/hash_utils.c trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; current 1688 arch/powerpc/mm/book3s64/hash_utils.c if (local && cpu_has_feature(CPU_FTR_TM) && current->thread.regs && current 1689 arch/powerpc/mm/book3s64/hash_utils.c MSR_TM_ACTIVE(current->thread.regs->msr)) { current 119 arch/powerpc/mm/book3s64/mmu_context.c memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)); current 122 arch/powerpc/mm/book3s64/mmu_context.c if (current->mm->context.hash_context->spt) { current 396 arch/powerpc/mm/book3s64/pkeys.c if (!current->mm) current 400 arch/powerpc/mm/book3s64/pkeys.c if (current->mm != vma->vm_mm) current 50 arch/powerpc/mm/book3s64/radix_hugetlbpage.c struct mm_struct *mm = current->mm; current 619 arch/powerpc/mm/book3s64/radix_tlb.c if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) current 642 arch/powerpc/mm/book3s64/radix_tlb.c if (current->mm == mm) current 645 arch/powerpc/mm/book3s64/radix_tlb.c if (current->active_mm == mm) { current 649 arch/powerpc/mm/book3s64/radix_tlb.c BUG_ON(current->mm); current 651 arch/powerpc/mm/book3s64/radix_tlb.c switch_mm(mm, &init_mm, current); current 652 arch/powerpc/mm/book3s64/radix_tlb.c current->active_mm = &init_mm; current 318 arch/powerpc/mm/book3s64/slb.c struct mm_struct *mm = current->mm; current 368 arch/powerpc/mm/book3s64/slb.c struct mm_struct *mm = current->mm; current 802 arch/powerpc/mm/book3s64/slb.c struct mm_struct *mm = current->mm; current 88 arch/powerpc/mm/book3s64/subpage_prot.c struct mm_struct *mm = current->mm; current 191 arch/powerpc/mm/book3s64/subpage_prot.c struct mm_struct *mm = current->mm; current 80 arch/powerpc/mm/copro_fault.c current->maj_flt++; current 82 arch/powerpc/mm/copro_fault.c current->min_flt++; current 105 arch/powerpc/mm/fault.c struct mm_struct *mm = current->mm; current 148 arch/powerpc/mm/fault.c current->thread.trap_nr = BUS_ADRERR; current 154 arch/powerpc/mm/fault.c current->comm, current->pid, address); current 177 arch/powerpc/mm/fault.c if (fatal_signal_pending(current) && !user_mode(regs)) current 260 arch/powerpc/mm/fault.c struct pt_regs *uregs = current->thread.regs; current 361 arch/powerpc/mm/fault.c current->comm, current->pid, address, current 440 arch/powerpc/mm/fault.c struct mm_struct *mm = current->mm; current 602 arch/powerpc/mm/fault.c if (!fatal_signal_pending(current)) current 613 arch/powerpc/mm/fault.c up_read(¤t->mm->mmap_sem); current 622 arch/powerpc/mm/fault.c current->maj_flt++; current 626 arch/powerpc/mm/fault.c current->min_flt++; current 685 arch/powerpc/mm/fault.c if (task_stack_end_corrupted(current)) current 30 arch/powerpc/mm/mmap.c if (current->personality & ADDR_COMPAT_LAYOUT) current 55 arch/powerpc/mm/mmap.c if (!(current->flags & PF_RANDOMIZE)) current 94 arch/powerpc/mm/mmap.c struct mm_struct *mm = current->mm; current 138 arch/powerpc/mm/mmap.c struct mm_struct *mm = current->mm; current 211 arch/powerpc/mm/mmap.c if (current->flags & PF_RANDOMIZE) current 33 arch/powerpc/mm/pgtable.c return current->thread.regs && TRAP(current->thread.regs) == 0x400; current 176 arch/powerpc/mm/slice.c if (mm != current->active_mm) current 179 arch/powerpc/mm/slice.c copy_mm_to_paca(current->active_mm); current 440 arch/powerpc/mm/slice.c struct mm_struct *mm = current->mm; current 649 arch/powerpc/mm/slice.c mm_ctx_user_psize(¤t->mm->context), 0); current 659 arch/powerpc/mm/slice.c mm_ctx_user_psize(¤t->mm->context), 1); current 718 arch/powerpc/mm/slice.c struct mm_struct *mm = current->mm; current 75 arch/powerpc/oprofile/backtrace.c if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) current 33 arch/powerpc/perf/callchain.c if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) current 59 arch/powerpc/perf/callchain.c if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) current 122 arch/powerpc/perf/callchain.c pgdir = current->mm->pgd; current 209 arch/powerpc/perf/callchain.c if (vdso64_rt_sigtramp && current->mm->context.vdso_base && current 210 arch/powerpc/perf/callchain.c nip == current->mm->context.vdso_base + vdso64_rt_sigtramp) current 294 arch/powerpc/perf/callchain.c return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT); current 368 arch/powerpc/perf/callchain.c if (vdso32_sigtramp && current->mm->context.vdso_base && current 369 arch/powerpc/perf/callchain.c nip == current->mm->context.vdso_base + vdso32_sigtramp) current 379 arch/powerpc/perf/callchain.c if (vdso32_rt_sigtramp && current->mm->context.vdso_base && current 380 arch/powerpc/perf/callchain.c nip == current->mm->context.vdso_base + vdso32_rt_sigtramp) current 571 arch/powerpc/perf/core-book3s.c if (!is_ebb_event(event) || current->thread.used_ebb) current 580 arch/powerpc/perf/core-book3s.c current->thread.used_ebb = 1; current 581 arch/powerpc/perf/core-book3s.c current->thread.mmcr0 |= MMCR0_PMXE; current 589 arch/powerpc/perf/core-book3s.c current->thread.siar = mfspr(SPRN_SIAR); current 590 arch/powerpc/perf/core-book3s.c current->thread.sier = mfspr(SPRN_SIER); current 591 arch/powerpc/perf/core-book3s.c current->thread.sdar = mfspr(SPRN_SDAR); current 592 arch/powerpc/perf/core-book3s.c current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; current 593 arch/powerpc/perf/core-book3s.c current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; current 611 arch/powerpc/perf/core-book3s.c mmcr0 |= current->thread.mmcr0; current 618 arch/powerpc/perf/core-book3s.c if (!(current->thread.mmcr0 & MMCR0_PMXE)) current 621 arch/powerpc/perf/core-book3s.c mtspr(SPRN_SIAR, current->thread.siar); current 622 arch/powerpc/perf/core-book3s.c mtspr(SPRN_SIER, current->thread.sier); current 623 arch/powerpc/perf/core-book3s.c mtspr(SPRN_SDAR, current->thread.sdar); current 632 arch/powerpc/perf/core-book3s.c mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2); current 677 arch/powerpc/perf/core-book3s.c if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO) current 681 arch/powerpc/perf/core-book3s.c if (ebb && !(current->thread.bescr & BESCR_GE)) current 112 arch/powerpc/perf/perf_regs.c regs_user->regs = task_pt_regs(current); current 113 arch/powerpc/perf/perf_regs.c regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : current 269 arch/powerpc/platforms/83xx/suspend.c if (signal_pending(current) || pci_pm_state < 2) current 49 arch/powerpc/platforms/cell/spufs/context.c ctx->owner = get_task_mm(current); current 86 arch/powerpc/platforms/cell/spufs/coredump.c int n = iterate_fd(current->files, *fd, match_context, NULL); current 128 arch/powerpc/platforms/cell/spufs/fault.c ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); current 317 arch/powerpc/platforms/cell/spufs/file.c if (fatal_signal_pending(current)) current 339 arch/powerpc/platforms/cell/spufs/file.c up_read(¤t->mm->mmap_sem); current 343 arch/powerpc/platforms/cell/spufs/file.c down_read(¤t->mm->mmap_sem); current 1456 arch/powerpc/platforms/cell/spufs/file.c if (ctx->owner != current->mm) current 259 arch/powerpc/platforms/cell/spufs/run.c if (signal_pending(current)) current 415 arch/powerpc/platforms/cell/spufs/run.c if (signal_pending(current)) current 109 arch/powerpc/platforms/cell/spufs/sched.c ctx->tid = current->pid; current 117 arch/powerpc/platforms/cell/spufs/sched.c if (rt_prio(current->prio)) current 118 arch/powerpc/platforms/cell/spufs/sched.c ctx->prio = current->prio; current 120 arch/powerpc/platforms/cell/spufs/sched.c ctx->prio = current->static_prio; current 121 arch/powerpc/platforms/cell/spufs/sched.c ctx->policy = current->policy; current 131 arch/powerpc/platforms/cell/spufs/sched.c cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); current 229 arch/powerpc/platforms/cell/spufs/sched.c spu->pid = current->pid; current 230 arch/powerpc/platforms/cell/spufs/sched.c spu->tgid = current->tgid; current 545 arch/powerpc/platforms/cell/spufs/sched.c if (!signal_pending(current)) { current 788 arch/powerpc/platforms/cell/spufs/sched.c if (signal_pending(current)) current 1080 arch/powerpc/platforms/cell/spufs/sched.c idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); current 314 arch/powerpc/platforms/cell/spufs/spufs.h if (signal_pending(current)) { \ current 30 arch/powerpc/platforms/chrp/nvram.c current->comm, addr, nvram_size); current 51 arch/powerpc/platforms/chrp/nvram.c current->comm, addr, nvram_size); current 170 arch/powerpc/platforms/powernv/smp.c current->active_mm = NULL; /* for sanity */ current 854 arch/powerpc/platforms/powernv/vas-window.c trace_vas_rx_win_open(current, vasid, cop, rxattr); current 878 arch/powerpc/platforms/powernv/vas-window.c rxwin->pid = task_pid_vnr(current); current 984 arch/powerpc/platforms/powernv/vas-window.c trace_vas_tx_win_open(current, vasid, cop, attr); current 1078 arch/powerpc/platforms/powernv/vas-window.c trace_vas_paste_crb(current, txwin); current 1708 arch/powerpc/xmon/xmon.c printf(" current = 0x%px\n", current); current 1713 arch/powerpc/xmon/xmon.c if (current) { current 1715 arch/powerpc/xmon/xmon.c current->pid, current->comm); current 62 arch/riscv/include/asm/elf.h (elf_addr_t)current->mm->context.vdso); \ current 136 arch/riscv/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 42 arch/riscv/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 43 arch/riscv/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 75 arch/riscv/kernel/process.c fstate_restore(current, regs); current 90 arch/riscv/kernel/process.c fstate_off(current, task_pt_regs(current)); current 91 arch/riscv/kernel/process.c memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate)); current 155 arch/riscv/kernel/ptrace.c syscall_set_nr(current, regs, -1); current 159 arch/riscv/kernel/ptrace.c trace_sys_enter(regs, syscall_get_nr(current, regs)); current 35 arch/riscv/kernel/signal.c err = __copy_from_user(¤t->thread.fstate, state, sizeof(*state)); current 39 arch/riscv/kernel/signal.c fstate_restore(current, regs); current 62 arch/riscv/kernel/signal.c fstate_save(current, regs); current 63 arch/riscv/kernel/signal.c err = __copy_to_user(state, ¤t->thread.fstate, sizeof(*state)); current 101 arch/riscv/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 122 arch/riscv/kernel/signal.c task = current; current 193 arch/riscv/kernel/signal.c current->mm->context.vdso, rt_sigreturn); current 210 arch/riscv/kernel/signal.c current->comm, task_pid_nr(current), ksig->sig, current 142 arch/riscv/kernel/smpboot.c current->active_mm = mm; current 31 arch/riscv/kernel/stacktrace.c } else if (task == NULL || task == current) { current 59 arch/riscv/kernel/stacktrace.c pc = ftrace_graph_ret_addr(current, NULL, frame->ra, current 75 arch/riscv/kernel/stacktrace.c } else if (task == NULL || task == current) { current 126 arch/riscv/kernel/stacktrace.c if (likely(task && task != current && task->state != TASK_RUNNING)) current 64 arch/riscv/kernel/sys_riscv.c flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL); current 61 arch/riscv/kernel/traps.c struct task_struct *tsk = current; current 58 arch/riscv/kernel/vdso.c struct mm_struct *mm = current->mm; current 50 arch/riscv/mm/cacheflush.c if (mm != current->active_mm || !local) { current 40 arch/riscv/mm/fault.c tsk = current; current 523 arch/s390/crypto/prng.c if (signal_pending(current)) { current 606 arch/s390/crypto/prng.c if (signal_pending(current)) { current 197 arch/s390/include/asm/compat.h stack = KSTK_ESP(current); current 176 arch/s390/include/asm/elf.h !current->mm->context.alloc_pgste) { \ current 178 arch/s390/include/asm/elf.h set_pt_regs_flag(task_pt_regs(current), \ current 235 arch/s390/include/asm/elf.h (current->personality & (~PER_MASK))); \ current 236 arch/s390/include/asm/elf.h current->thread.sys_call_table = \ current 242 arch/s390/include/asm/elf.h if (personality(current->personality) != PER_LINUX32) \ current 244 arch/s390/include/asm/elf.h (current->personality & ~PER_MASK)); \ current 247 arch/s390/include/asm/elf.h current->thread.sys_call_table = \ current 251 arch/s390/include/asm/elf.h current->thread.sys_call_table = \ current 276 arch/s390/include/asm/elf.h (unsigned long)current->mm->context.vdso_base); \ current 47 arch/s390/include/asm/kprobes.h (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) \ current 49 arch/s390/include/asm/kprobes.h : (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) current 32 arch/s390/include/asm/mmu_context.h (current->mm && current->mm->context.alloc_pgste); current 111 arch/s390/include/asm/mmu_context.h struct task_struct *tsk = current; current 122 arch/s390/include/asm/mmu_context.h set_fs(current->thread.mm_segment); current 131 arch/s390/include/asm/mmu_context.h switch_mm(prev, next, current); current 1283 arch/s390/include/asm/pgtable.h return end <= current->mm->context.asce_limit; current 99 arch/s390/include/asm/processor.h #define TASK_SIZE TASK_SIZE_OF(current) current 182 arch/s390/include/asm/processor.h crst_table_downgrade(current->mm); \ current 41 arch/s390/include/asm/stacktrace.h if (task == current) current 34 arch/s390/include/asm/uaccess.h #define get_fs() (current->thread.mm_segment) current 56 arch/s390/kernel/compat_signal.c save_access_regs(current->thread.acrs); current 63 arch/s390/kernel/compat_signal.c restore_access_regs(current->thread.acrs); current 78 arch/s390/kernel/compat_signal.c memcpy(&user_sregs.regs.acrs, current->thread.acrs, current 80 arch/s390/kernel/compat_signal.c fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); current 92 arch/s390/kernel/compat_signal.c current->restart_block.fn = do_no_restart_syscall; current 97 arch/s390/kernel/compat_signal.c if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI)) current 116 arch/s390/kernel/compat_signal.c memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, current 117 arch/s390/kernel/compat_signal.c sizeof(current->thread.acrs)); current 118 arch/s390/kernel/compat_signal.c fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); current 141 arch/s390/kernel/compat_signal.c vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); current 145 arch/s390/kernel/compat_signal.c current->thread.fpu.vxrs + __NUM_VXRS_LOW, current 170 arch/s390/kernel/compat_signal.c __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, current 175 arch/s390/kernel/compat_signal.c *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; current 182 arch/s390/kernel/compat_signal.c struct pt_regs *regs = task_pt_regs(current); current 203 arch/s390/kernel/compat_signal.c struct pt_regs *regs = task_pt_regs(current); current 247 arch/s390/kernel/compat_signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 332 arch/s390/kernel/compat_signal.c regs->gprs[6] = current->thread.last_break; current 407 arch/s390/kernel/compat_signal.c regs->gprs[5] = current->thread.last_break; current 96 arch/s390/kernel/dumpstack.c task = task ? : current; current 102 arch/s390/kernel/dumpstack.c if (task != current) current 132 arch/s390/kernel/dumpstack.c task = current; current 137 arch/s390/kernel/dumpstack.c debug_show_held_locks(task ? : current); current 209 arch/s390/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 25 arch/s390/kernel/guarded_storage.c if (!current->thread.gs_cb) { current 33 arch/s390/kernel/guarded_storage.c current->thread.gs_cb = gs_cb; current 41 arch/s390/kernel/guarded_storage.c if (current->thread.gs_cb) { current 43 arch/s390/kernel/guarded_storage.c kfree(current->thread.gs_cb); current 44 arch/s390/kernel/guarded_storage.c current->thread.gs_cb = NULL; current 55 arch/s390/kernel/guarded_storage.c gs_cb = current->thread.gs_bc_cb; current 60 arch/s390/kernel/guarded_storage.c current->thread.gs_bc_cb = gs_cb; current 71 arch/s390/kernel/guarded_storage.c gs_cb = current->thread.gs_bc_cb; current 72 arch/s390/kernel/guarded_storage.c current->thread.gs_bc_cb = NULL; current 83 arch/s390/kernel/guarded_storage.c gs_cb = current->thread.gs_bc_cb; current 85 arch/s390/kernel/guarded_storage.c kfree(current->thread.gs_cb); current 86 arch/s390/kernel/guarded_storage.c current->thread.gs_bc_cb = NULL; current 89 arch/s390/kernel/guarded_storage.c current->thread.gs_cb = gs_cb; current 99 arch/s390/kernel/guarded_storage.c for_each_thread(current, sibling) { current 375 arch/s390/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 395 arch/s390/kernel/kprobes.c if (ri->task != current) current 414 arch/s390/kernel/kprobes.c if (ri->task != current) current 438 arch/s390/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 181 arch/s390/kernel/nmi.c current->comm, current->pid); current 229 arch/s390/kernel/perf_event.c unwind_for_each_frame(&state, current, regs, 0) { current 23 arch/s390/kernel/perf_regs.c fp = MACHINE_HAS_VX ? *(freg_t *)(current->thread.fpu.vxrs + idx) current 24 arch/s390/kernel/perf_regs.c : current->thread.fpu.fprs[idx]; current 65 arch/s390/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 68 arch/s390/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 55 arch/s390/kernel/process.c if (S390_lowcore.current_pid != current->pid) { current 56 arch/s390/kernel/process.c S390_lowcore.current_pid = current->pid; current 158 arch/s390/kernel/process.c current->thread.fpu.fpc = 0; current 168 arch/s390/kernel/process.c fpregs->fpc = current->thread.fpu.fpc; current 172 arch/s390/kernel/process.c current->thread.fpu.vxrs); current 174 arch/s390/kernel/process.c memcpy(&fpregs->fprs, current->thread.fpu.fprs, current 186 arch/s390/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) current 216 arch/s390/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 89 arch/s390/kernel/processor.c current->active_mm = &init_mm; current 90 arch/s390/kernel/processor.c BUG_ON(current->mm); current 91 arch/s390/kernel/processor.c enter_lazy_tlb(&init_mm, current); current 897 arch/s390/kernel/ptrace.c if (target == current) current 926 arch/s390/kernel/ptrace.c if (target == current) current 949 arch/s390/kernel/ptrace.c if (rc == 0 && target == current) current 961 arch/s390/kernel/ptrace.c if (target == current) current 979 arch/s390/kernel/ptrace.c if (target == current) current 1071 arch/s390/kernel/ptrace.c if (target == current) current 1088 arch/s390/kernel/ptrace.c if (target == current) current 1111 arch/s390/kernel/ptrace.c if (target == current) current 1127 arch/s390/kernel/ptrace.c if (target == current) current 1166 arch/s390/kernel/ptrace.c if (target == current) current 1189 arch/s390/kernel/ptrace.c else if (target == current) current 1203 arch/s390/kernel/ptrace.c if (target == current) { current 1306 arch/s390/kernel/ptrace.c if (target == current) current 1328 arch/s390/kernel/ptrace.c if (target == current) current 1431 arch/s390/kernel/ptrace.c if (target == current) current 1460 arch/s390/kernel/ptrace.c if (target == current) current 1483 arch/s390/kernel/ptrace.c if (rc == 0 && target == current) current 33 arch/s390/kernel/runtime_instr.c struct task_struct *task = current; current 85 arch/s390/kernel/runtime_instr.c if (!current->thread.ri_cb) { current 90 arch/s390/kernel/runtime_instr.c cb = current->thread.ri_cb; current 98 arch/s390/kernel/runtime_instr.c current->thread.ri_cb = cb; current 365 arch/s390/kernel/setup.c current->stack = (void *) stack; current 367 arch/s390/kernel/setup.c current->stack_vm_area = (void *) stack; current 369 arch/s390/kernel/setup.c set_task_stack_end_magic(current); current 109 arch/s390/kernel/signal.c save_access_regs(current->thread.acrs); current 116 arch/s390/kernel/signal.c restore_access_regs(current->thread.acrs); current 130 arch/s390/kernel/signal.c memcpy(&user_sregs.regs.acrs, current->thread.acrs, current 132 arch/s390/kernel/signal.c fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); current 143 arch/s390/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 148 arch/s390/kernel/signal.c if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) current 167 arch/s390/kernel/signal.c memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, current 168 arch/s390/kernel/signal.c sizeof(current->thread.acrs)); current 170 arch/s390/kernel/signal.c fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); current 186 arch/s390/kernel/signal.c vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); current 190 arch/s390/kernel/signal.c current->thread.fpu.vxrs + __NUM_VXRS_LOW, current 207 arch/s390/kernel/signal.c __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, current 212 arch/s390/kernel/signal.c *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; current 219 arch/s390/kernel/signal.c struct pt_regs *regs = task_pt_regs(current); current 241 arch/s390/kernel/signal.c struct pt_regs *regs = task_pt_regs(current); current 281 arch/s390/kernel/signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 364 arch/s390/kernel/signal.c regs->gprs[6] = current->thread.last_break; current 435 arch/s390/kernel/signal.c regs->gprs[5] = current->thread.last_break; current 472 arch/s390/kernel/signal.c current->thread.system_call = current 477 arch/s390/kernel/signal.c if (current->thread.system_call) { current 478 arch/s390/kernel/signal.c regs->int_code = current->thread.system_call; current 511 arch/s390/kernel/signal.c if (current->thread.system_call) { current 512 arch/s390/kernel/signal.c regs->int_code = current->thread.system_call; current 84 arch/s390/kernel/sys_s390.c unsigned int ret = current->personality; current 86 arch/s390/kernel/sys_s390.c if (personality(current->personality) == PER_LINUX32 && current 34 arch/s390/kernel/traps.c address = *(unsigned long *)(current->thread.trap_tdb + 24); current 79 arch/s390/kernel/traps.c if (!current->ptrace) current 82 arch/s390/kernel/traps.c (void __force __user *) current->thread.per_event.address); current 167 arch/s390/kernel/traps.c if (current->ptrace) current 207 arch/s390/kernel/traps.c vic = (current->thread.fpu.fpc & 0xf00) >> 8; current 233 arch/s390/kernel/traps.c if (current->thread.fpu.fpc & FPC_DXC_MASK) current 234 arch/s390/kernel/traps.c do_fp_trap(regs, current->thread.fpu.fpc); current 39 arch/s390/kernel/uprobes.c regs->psw.addr = current->utask->xol_vaddr; current 40 arch/s390/kernel/uprobes.c set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); current 41 arch/s390/kernel/uprobes.c update_cr_regs(current); current 71 arch/s390/kernel/uprobes.c regs->psw.addr >= current->thread.per_user.start && current 72 arch/s390/kernel/uprobes.c regs->psw.addr <= current->thread.per_user.end) current 81 arch/s390/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 83 arch/s390/kernel/uprobes.c clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); current 84 arch/s390/kernel/uprobes.c update_cr_regs(current); current 101 arch/s390/kernel/uprobes.c if (check_per_event(current->thread.per_event.cause, current 102 arch/s390/kernel/uprobes.c current->thread.per_user.control, regs)) { current 104 arch/s390/kernel/uprobes.c current->thread.per_event.address = utask->vaddr; current 139 arch/s390/kernel/uprobes.c regs->psw.addr = current->utask->vaddr; current 140 arch/s390/kernel/uprobes.c current->thread.per_event.address = current->utask->vaddr; current 254 arch/s390/kernel/uprobes.c if (!(current->thread.per_user.control & PER_EVENT_STORE)) current 256 arch/s390/kernel/uprobes.c if ((void *)current->thread.per_user.start > (addr + len)) current 258 arch/s390/kernel/uprobes.c if ((void *)current->thread.per_user.end < addr) current 260 arch/s390/kernel/uprobes.c current->thread.per_event.address = regs->psw.addr; current 261 arch/s390/kernel/uprobes.c current->thread.per_event.cause = PER_EVENT_STORE >> 16; current 87 arch/s390/kernel/vdso.c if (WARN_ON_ONCE(current->mm != vma->vm_mm)) current 90 arch/s390/kernel/vdso.c current->mm->context.vdso_base = vma->vm_start; current 203 arch/s390/kernel/vdso.c struct mm_struct *mm = current->mm; current 257 arch/s390/kernel/vdso.c current->mm->context.vdso_base = vdso_base; current 196 arch/s390/kernel/vtime.c S390_lowcore.user_timer = current->thread.user_timer; current 197 arch/s390/kernel/vtime.c S390_lowcore.guest_timer = current->thread.guest_timer; current 198 arch/s390/kernel/vtime.c S390_lowcore.system_timer = current->thread.system_timer; current 199 arch/s390/kernel/vtime.c S390_lowcore.hardirq_timer = current->thread.hardirq_timer; current 200 arch/s390/kernel/vtime.c S390_lowcore.softirq_timer = current->thread.softirq_timer; current 94 arch/s390/kvm/intercept.c current->pid, vcpu->kvm); current 214 arch/s390/kvm/intercept.c if (current->thread.per_flags & PER_FLAG_NO_TE) current 561 arch/s390/kvm/interrupt.c save_gs_cb(current->thread.gs_cb); current 622 arch/s390/kvm/interrupt.c rc |= put_guest_lc(vcpu, current->thread.fpu.fpc, current 600 arch/s390/kvm/kvm-s390.c if (fatal_signal_pending(current)) current 875 arch/s390/kvm/kvm-s390.c struct gmap *new = gmap_create(current->mm, new_limit); current 1800 arch/s390/kvm/kvm-s390.c if (!mm_uses_skeys(current->mm)) current 1811 arch/s390/kvm/kvm-s390.c down_read(¤t->mm->mmap_sem); current 1820 arch/s390/kvm/kvm-s390.c r = get_guest_storage_key(current->mm, hva, &keys[i]); current 1825 arch/s390/kvm/kvm-s390.c up_read(¤t->mm->mmap_sem); current 1869 arch/s390/kvm/kvm-s390.c down_read(¤t->mm->mmap_sem); current 1885 arch/s390/kvm/kvm-s390.c r = set_guest_storage_key(current->mm, hva, keys[i], 0); current 1887 arch/s390/kvm/kvm-s390.c r = fixup_user_fault(current, current->mm, hva, current 1896 arch/s390/kvm/kvm-s390.c up_read(¤t->mm->mmap_sem); current 2439 arch/s390/kvm/kvm-s390.c sprintf(debug_name, "kvm-%u", current->pid); current 2501 arch/s390/kvm/kvm-s390.c kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); current 2513 arch/s390/kvm/kvm-s390.c KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); current 2578 arch/s390/kvm/kvm-s390.c vcpu->arch.gmap = gmap_create(current->mm, -1UL); current 3685 arch/s390/kvm/kvm-s390.c hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); current 3686 arch/s390/kvm/kvm-s390.c hva += current->thread.gmap_addr & ~PAGE_MASK; current 3690 arch/s390/kvm/kvm-s390.c rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); current 3815 arch/s390/kvm/kvm-s390.c current->thread.gmap_addr; current 3818 arch/s390/kvm/kvm-s390.c } else if (current->thread.gmap_pfault) { current 3820 arch/s390/kvm/kvm-s390.c current->thread.gmap_pfault = 0; current 3823 arch/s390/kvm/kvm-s390.c return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); current 3861 arch/s390/kvm/kvm-s390.c } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); current 3930 arch/s390/kvm/kvm-s390.c vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; current 3931 arch/s390/kvm/kvm-s390.c vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; current 3933 arch/s390/kvm/kvm-s390.c current->thread.fpu.regs = vcpu->run->s.regs.vrs; current 3935 arch/s390/kvm/kvm-s390.c current->thread.fpu.regs = vcpu->run->s.regs.fprs; current 3936 arch/s390/kvm/kvm-s390.c current->thread.fpu.fpc = vcpu->run->s.regs.fpc; current 3937 arch/s390/kvm/kvm-s390.c if (test_fp_ctl(current->thread.fpu.fpc)) current 3939 arch/s390/kvm/kvm-s390.c current->thread.fpu.fpc = 0; current 3943 arch/s390/kvm/kvm-s390.c if (current->thread.gs_cb) { current 3944 arch/s390/kvm/kvm-s390.c vcpu->arch.host_gscb = current->thread.gs_cb; current 3948 arch/s390/kvm/kvm-s390.c current->thread.gs_cb = (struct gs_cb *) current 3950 arch/s390/kvm/kvm-s390.c restore_gs_cb(current->thread.gs_cb); current 3978 arch/s390/kvm/kvm-s390.c vcpu->run->s.regs.fpc = current->thread.fpu.fpc; current 3980 arch/s390/kvm/kvm-s390.c current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; current 3981 arch/s390/kvm/kvm-s390.c current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; current 3985 arch/s390/kvm/kvm-s390.c save_gs_cb(current->thread.gs_cb); current 3987 arch/s390/kvm/kvm-s390.c current->thread.gs_cb = vcpu->arch.host_gscb; current 4033 arch/s390/kvm/kvm-s390.c if (signal_pending(current) && !rc) { current 4125 arch/s390/kvm/kvm-s390.c vcpu->run->s.regs.fpc = current->thread.fpu.fpc; current 64 arch/s390/kvm/priv.c current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb; current 65 arch/s390/kvm/priv.c restore_gs_cb(current->thread.gs_cb); current 273 arch/s390/kvm/priv.c down_read(¤t->mm->mmap_sem); current 274 arch/s390/kvm/priv.c rc = get_guest_storage_key(current->mm, vmaddr, &key); current 277 arch/s390/kvm/priv.c rc = fixup_user_fault(current, current->mm, vmaddr, current 280 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 284 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 320 arch/s390/kvm/priv.c down_read(¤t->mm->mmap_sem); current 321 arch/s390/kvm/priv.c rc = reset_guest_reference_bit(current->mm, vmaddr); current 323 arch/s390/kvm/priv.c rc = fixup_user_fault(current, current->mm, vmaddr, current 326 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 330 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 388 arch/s390/kvm/priv.c down_read(¤t->mm->mmap_sem); current 389 arch/s390/kvm/priv.c rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey, current 394 arch/s390/kvm/priv.c rc = fixup_user_fault(current, current->mm, vmaddr, current 398 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 1089 arch/s390/kvm/priv.c down_read(¤t->mm->mmap_sem); current 1090 arch/s390/kvm/priv.c rc = cond_set_guest_storage_key(current->mm, vmaddr, current 1093 arch/s390/kvm/priv.c rc = fixup_user_fault(current, current->mm, vmaddr, current 1097 arch/s390/kvm/priv.c up_read(¤t->mm->mmap_sem); current 909 arch/s390/kvm/vsie.c if (current->thread.gmap_int_code == PGM_PROTECTION) current 912 arch/s390/kvm/vsie.c current->thread.gmap_addr, 1); current 915 arch/s390/kvm/vsie.c current->thread.gmap_addr); current 918 arch/s390/kvm/vsie.c current->thread.gmap_addr, current 919 arch/s390/kvm/vsie.c current->thread.gmap_write_flag); current 921 arch/s390/kvm/vsie.c vsie_page->fault_addr = current->thread.gmap_addr; current 1184 arch/s390/kvm/vsie.c if (rc || scb_s->icptcode || signal_pending(current) || current 1304 arch/s390/kvm/vsie.c if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) || current 45 arch/s390/lib/uaccess.c current->thread.mm_segment = fs; current 69 arch/s390/lib/uaccess.c old_fs = current->thread.mm_segment; current 74 arch/s390/lib/uaccess.c current->thread.mm_segment |= 1; current 96 arch/s390/lib/uaccess.c current->thread.mm_segment = old_fs; current 83 arch/s390/mm/fault.c if (current->thread.mm_segment == USER_DS) current 89 arch/s390/mm/fault.c if (current->thread.mm_segment & 1) { current 90 arch/s390/mm/fault.c if (current->thread.mm_segment == USER_DS_SACF) current 214 arch/s390/mm/fault.c if ((task_pid_nr(current) > 1) && !show_unhandled_signals) current 216 arch/s390/mm/fault.c if (!unhandled_signal(current, signr)) current 397 arch/s390/mm/fault.c tsk = current; current 442 arch/s390/mm/fault.c current->thread.gmap_addr = address; current 443 arch/s390/mm/fault.c current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); current 444 arch/s390/mm/fault.c current->thread.gmap_int_code = regs->int_code & 0xffff; current 484 arch/s390/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { current 513 arch/s390/mm/fault.c current->thread.gmap_pfault = 1; current 527 arch/s390/mm/fault.c address = __gmap_link(gmap, current->thread.gmap_addr, current 745 arch/s390/mm/fault.c if (WARN_ON_ONCE(tsk != current)) current 652 arch/s390/mm/gmap.c if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, current 882 arch/s390/mm/gmap.c if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked)) current 2538 arch/s390/mm/gmap.c struct mm_struct *mm = current->mm; current 2600 arch/s390/mm/gmap.c struct mm_struct *mm = current->mm; current 286 arch/s390/mm/hugetlbpage.c info.low_limit = current->mm->mmap_base; current 304 arch/s390/mm/hugetlbpage.c info.high_limit = current->mm->mmap_base; current 330 arch/s390/mm/hugetlbpage.c struct mm_struct *mm = current->mm; current 363 arch/s390/mm/hugetlbpage.c if (addr + len > current->mm->context.asce_limit && current 25 arch/s390/mm/mmap.c if (!(current->flags & PF_RANDOMIZE)) current 32 arch/s390/mm/mmap.c if (current->personality & ADDR_COMPAT_LAYOUT) current 79 arch/s390/mm/mmap.c struct mm_struct *mm = current->mm; current 112 arch/s390/mm/mmap.c if (addr + len > current->mm->context.asce_limit && current 128 arch/s390/mm/mmap.c struct mm_struct *mm = current->mm; current 177 arch/s390/mm/mmap.c if (addr + len > current->mm->context.asce_limit && current 195 arch/s390/mm/mmap.c if (current->flags & PF_RANDOMIZE) current 74 arch/s390/mm/pgalloc.c if (current->active_mm == mm) { current 76 arch/s390/mm/pgalloc.c if (current->thread.mm_segment == USER_DS) { current 81 arch/s390/mm/pgalloc.c if (current->thread.mm_segment == USER_DS_SACF) { current 138 arch/s390/mm/pgalloc.c if (current->active_mm == mm) { current 151 arch/s390/mm/pgalloc.c if (current->active_mm == mm) current 835 arch/s390/mm/pgtable.c rc = get_guest_storage_key(current->mm, addr, &tmp); current 847 arch/s390/mm/pgtable.c rc = set_guest_storage_key(current->mm, addr, key, nq); current 22 arch/s390/oprofile/init.c unwind_for_each_frame(&state, current, regs, 0) { current 128 arch/s390/pci/pci_mmio.c down_read(¤t->mm->mmap_sem); current 130 arch/s390/pci/pci_mmio.c vma = find_vma(current->mm, user_addr); current 138 arch/s390/pci/pci_mmio.c up_read(¤t->mm->mmap_sem); current 188 arch/sh/include/asm/elf.h set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK))) current 200 arch/sh/include/asm/elf.h #define VDSO_BASE ((unsigned long)current->mm->context.vdso) current 25 arch/sh/include/asm/stackprotector.h current->stack_canary = canary; current 26 arch/sh/include/asm/stackprotector.h __stack_chk_guard = current->stack_canary; current 14 arch/sh/kernel/cpu/fpu.c if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) current 46 arch/sh/kernel/cpu/fpu.c struct task_struct *tsk = current; current 56 arch/sh/kernel/cpu/fpu.c struct task_struct *tsk = current; current 454 arch/sh/kernel/cpu/sh2a/fpu.c struct task_struct *tsk = current; current 466 arch/sh/kernel/cpu/sh2a/fpu.c struct task_struct *tsk = current; current 509 arch/sh/kernel/cpu/sh2a/fpu.c struct task_struct *tsk = current; current 558 arch/sh/kernel/cpu/sh2a/fpu.c struct task_struct *tsk = current; current 228 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 241 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 279 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 323 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 363 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 396 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 403 arch/sh/kernel/cpu/sh4/fpu.c struct task_struct *tsk = current; current 142 arch/sh/kernel/dumpstack.c tsk = current; current 152 arch/sh/kernel/dumpstack.c tsk = current; current 153 arch/sh/kernel/dumpstack.c if (tsk == current) current 610 arch/sh/kernel/dwarf.c ret_stack = ftrace_graph_get_ret_stack(current, 0); current 619 arch/sh/kernel/dwarf.c WARN_ON(ftrace_graph_get_ret_stack(current, 1)); current 330 arch/sh/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 312 arch/sh/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 328 arch/sh/kernel/kprobes.c if (ri->task != current) current 353 arch/sh/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 74 arch/sh/kernel/process_32.c free_thread_xstate(current); current 80 arch/sh/kernel/process_32.c struct task_struct *tsk = current; current 102 arch/sh/kernel/process_32.c struct task_struct *tsk = current; current 125 arch/sh/kernel/process_32.c struct task_struct *tsk = current; current 212 arch/sh/kernel/process_32.c if (!p || p == current || p->state == TASK_RUNNING) current 321 arch/sh/kernel/process_64.c if (last_task_used_math == current) { current 331 arch/sh/kernel/process_64.c if(current->thread.kregs==&fake_swapper_regs) { current 332 arch/sh/kernel/process_64.c current->thread.kregs = current 333 arch/sh/kernel/process_64.c ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1); current 334 arch/sh/kernel/process_64.c current->thread.uregs = current->thread.kregs; current 348 arch/sh/kernel/process_64.c struct task_struct *tsk = current; current 352 arch/sh/kernel/process_64.c if (current == last_task_used_math) { current 380 arch/sh/kernel/process_64.c if (last_task_used_math == current) { current 382 arch/sh/kernel/process_64.c save_fpu(current); current 436 arch/sh/kernel/process_64.c if (!p || p == current || p->state == TASK_RUNNING) current 80 arch/sh/kernel/signal_32.c struct task_struct *tsk = current; current 93 arch/sh/kernel/signal_32.c struct task_struct *tsk = current; current 137 arch/sh/kernel/signal_32.c struct task_struct *tsk = current; current 161 arch/sh/kernel/signal_32.c current->restart_block.fn = do_no_restart_syscall; current 191 arch/sh/kernel/signal_32.c current->restart_block.fn = do_no_restart_syscall; current 256 arch/sh/kernel/signal_32.c sp = current->sas_ss_sp + current->sas_ss_size; current 289 arch/sh/kernel/signal_32.c } else if (likely(current->mm->context.vdso)) { current 315 arch/sh/kernel/signal_32.c if (current->personality & FDPIC_FUNCPTRS) { current 328 arch/sh/kernel/signal_32.c current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); current 359 arch/sh/kernel/signal_32.c } else if (likely(current->mm->context.vdso)) { current 385 arch/sh/kernel/signal_32.c if (current->personality & FDPIC_FUNCPTRS) { current 398 arch/sh/kernel/signal_32.c current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); current 152 arch/sh/kernel/signal_64.c if (current == last_task_used_math) { current 157 arch/sh/kernel/signal_64.c err |= __copy_from_user(¤t->thread.xstate->hardfpu, &sc->sc_fpregs[0], current 174 arch/sh/kernel/signal_64.c if (current == last_task_used_math) { current 176 arch/sh/kernel/signal_64.c save_fpu(current); current 182 arch/sh/kernel/signal_64.c err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.xstate->hardfpu, current 260 arch/sh/kernel/signal_64.c current->restart_block.fn = do_no_restart_syscall; current 294 arch/sh/kernel/signal_64.c current->restart_block.fn = do_no_restart_syscall; current 366 arch/sh/kernel/signal_64.c sp = current->sas_ss_sp + current->sas_ss_size; current 453 arch/sh/kernel/signal_64.c sig, current->comm, current->pid, frame, current 533 arch/sh/kernel/signal_64.c sig, current->comm, current->pid, frame, current 60 arch/sh/kernel/smp.c init_new_context(current, &init_mm); current 181 arch/sh/kernel/smp.c current->active_mm = mm; current 183 arch/sh/kernel/smp.c enter_lazy_tlb(mm, current); current 364 arch/sh/kernel/smp.c if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { current 396 arch/sh/kernel/smp.c if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { current 440 arch/sh/kernel/smp.c (current->mm != vma->vm_mm)) { current 51 arch/sh/kernel/stacktrace.c unwind_stack(current, NULL, sp, &save_stack_ops, trace); current 84 arch/sh/kernel/stacktrace.c unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); current 29 arch/sh/kernel/swsusp.c init_fpu(current); current 72 arch/sh/kernel/sys_sh.c down_read(¤t->mm->mmap_sem); current 73 arch/sh/kernel/sys_sh.c vma = find_vma (current->mm, addr); current 75 arch/sh/kernel/sys_sh.c up_read(¤t->mm->mmap_sem); current 94 arch/sh/kernel/sys_sh.c up_read(¤t->mm->mmap_sem); current 37 arch/sh/kernel/traps.c printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, current 38 arch/sh/kernel/traps.c task_pid_nr(current), task_stack_page(current) + 1); current 42 arch/sh/kernel/traps.c (unsigned long)task_stack_page(current)); current 51 arch/sh/kernel/traps.c if (kexec_should_crash(current)) current 326 arch/sh/kernel/traps_32.c unaligned_fixups_notify(current, instruction, regs); current 503 arch/sh/kernel/traps_32.c unaligned_fixups_notify(current, instruction, regs); current 533 arch/sh/kernel/traps_32.c "access (PC %lx PR %lx)\n", current->comm, regs->pc, current 553 arch/sh/kernel/traps_32.c unaligned_fixups_notify(current, instruction, regs); current 635 arch/sh/kernel/traps_32.c current->thread.dsp_status.status |= SR_DSP; current 130 arch/sh/kernel/traps_64.c unaligned_fixups_notify(current, opcode, regs); current 340 arch/sh/kernel/traps_64.c if (last_task_used_math == current) { current 342 arch/sh/kernel/traps_64.c save_fpu(current); current 353 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; current 357 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; current 358 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; current 361 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi; current 362 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo; current 364 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; current 365 arch/sh/kernel/traps_64.c current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; current 410 arch/sh/kernel/traps_64.c if (last_task_used_math == current) { current 412 arch/sh/kernel/traps_64.c save_fpu(current); current 420 arch/sh/kernel/traps_64.c buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; current 424 arch/sh/kernel/traps_64.c buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; current 425 arch/sh/kernel/traps_64.c bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; current 428 arch/sh/kernel/traps_64.c bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; current 429 arch/sh/kernel/traps_64.c buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; current 431 arch/sh/kernel/traps_64.c buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; current 432 arch/sh/kernel/traps_64.c bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; current 60 arch/sh/kernel/vsyscall/vsyscall.c struct mm_struct *mm = current->mm; current 80 arch/sh/kernel/vsyscall/vsyscall.c current->mm->context.vdso = (void *)addr; current 552 arch/sh/math-emu/math.c struct task_struct *tsk = current; current 597 arch/sh/math-emu/math.c struct task_struct *tsk = current; current 72 arch/sh/mm/alignment.c if (current->thread.flags & SH_THREAD_UAC_SIGBUS) { current 77 arch/sh/mm/alignment.c if (current->thread.flags & SH_THREAD_UAC_NOPRINT) current 235 arch/sh/mm/cache-sh4.c if ((vma->vm_mm == current->active_mm)) current 258 arch/sh/mm/fault.c struct mm_struct *mm = current->mm; current 285 arch/sh/mm/fault.c struct task_struct *tsk = current; current 305 arch/sh/mm/fault.c if (fatal_signal_pending(current)) { current 307 arch/sh/mm/fault.c up_read(¤t->mm->mmap_sem); current 319 arch/sh/mm/fault.c up_read(¤t->mm->mmap_sem); current 323 arch/sh/mm/fault.c up_read(¤t->mm->mmap_sem); current 385 arch/sh/mm/fault.c tsk = current; current 37 arch/sh/mm/mmap.c struct mm_struct *mm = current->mm; current 86 arch/sh/mm/mmap.c struct mm_struct *mm = current->mm; current 25 arch/sh/mm/tlb-pteaex.c if (vma && current->active_mm != vma->vm_mm) current 35 arch/sh/mm/tlb-sh3.c if (vma && current->active_mm != vma->vm_mm) current 23 arch/sh/mm/tlb-sh4.c if (vma && current->active_mm != vma->vm_mm) current 39 arch/sh/mm/tlbex_32.c if (unlikely(address >= TASK_SIZE || !current->mm)) current 42 arch/sh/mm/tlbex_32.c pgd = pgd_offset(current->mm, address); current 55 arch/sh/mm/tlbex_64.c if (unlikely(address >= TASK_SIZE || !current->mm)) current 58 arch/sh/mm/tlbex_64.c pgd = pgd_offset(current->mm, address); current 28 arch/sh/mm/tlbflush_32.c if (vma->vm_mm != current->mm) { current 53 arch/sh/mm/tlbflush_32.c if (mm == current->mm) current 63 arch/sh/mm/tlbflush_32.c if (mm != current->mm) { current 117 arch/sh/mm/tlbflush_32.c if (mm == current->mm) current 139 arch/sh/mm/tlbflush_64.c if (mm == current->mm) current 22 arch/sparc/include/asm/cacheflush_64.h do { if ((__mm) == current->mm) flushw_user(); } while(0) current 18 arch/sparc/include/asm/current.h register struct task_struct *current asm("g4"); current 210 arch/sparc/include/asm/elf_64.h if (personality(current->personality) != PER_LINUX32) \ current 212 arch/sparc/include/asm/elf_64.h (current->personality & (~PER_MASK))); \ current 222 arch/sparc/include/asm/elf_64.h (unsigned long)current->mm->context.vdso); \ current 22 arch/sparc/include/asm/mman.h if (current->mm == mm) { current 25 arch/sparc/include/asm/mman.h regs = task_pt_regs(current); current 36 arch/sparc/include/asm/mman.h if (!current->mm->context.adi) { current 37 arch/sparc/include/asm/mman.h regs = task_pt_regs(current); current 39 arch/sparc/include/asm/mman.h current->mm->context.adi = true; current 40 arch/sparc/include/asm/mman.h on_each_cpu_mask(mm_cpumask(current->mm), current 41 arch/sparc/include/asm/mman.h ipi_set_tstate_mcde, current->mm, 0); current 67 arch/sparc/include/asm/mman.h vma = find_vma(current->mm, addr); current 181 arch/sparc/include/asm/mmu_context_64.h if (current && current->mm && current->mm->context.adi) { current 184 arch/sparc/include/asm/mmu_context_64.h regs = task_pt_regs(current); current 976 arch/sparc/include/asm/pgtable_64.h flush_dcache_page_all(current->mm, \ current 185 arch/sparc/include/asm/sfp-machine_32.h #define FP_ROUNDMODE ((current->thread.fsr >> 30) & 0x3) current 201 arch/sparc/include/asm/sfp-machine_32.h #define FP_INHIBIT_RESULTS ((current->thread.fsr >> 23) & _fex) current 207 arch/sparc/include/asm/sfp-machine_32.h #define FP_TRAPPING_EXCEPTIONS ((current->thread.fsr >> 23) & 0x1f) current 57 arch/sparc/include/asm/switch_to_64.h : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ current 28 arch/sparc/include/asm/uaccess_32.h #define get_fs() (current->thread.current_ds) current 29 arch/sparc/include/asm/uaccess_32.h #define set_fs(val) ((current->thread.current_ds) = (val)) current 130 arch/sparc/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 475 arch/sparc/kernel/kprobes.c kretprobe_hash_lock(current, &head, &flags); current 491 arch/sparc/kernel/kprobes.c if (ri->task != current) current 514 arch/sparc/kernel/kprobes.c kretprobe_hash_unlock(current, &flags); current 98 arch/sparc/kernel/leon_smp.c current->active_mm = &init_mm; current 1775 arch/sparc/kernel/perf_event.c ret_stack = ftrace_graph_get_ret_stack(current, current 1861 arch/sparc/kernel/perf_event.c if (!current->mm) current 159 arch/sparc/kernel/process_32.c tsk = current; current 161 arch/sparc/kernel/process_32.c if (tsk == current && !_ksp) current 207 arch/sparc/kernel/process_32.c if(last_task_used_math == current) { current 213 arch/sparc/kernel/process_32.c fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, current 214 arch/sparc/kernel/process_32.c ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); current 223 arch/sparc/kernel/process_32.c if (current->thread.flags & SPARC_FLAG_KTHREAD) { current 224 arch/sparc/kernel/process_32.c current->thread.flags &= ~SPARC_FLAG_KTHREAD; current 228 arch/sparc/kernel/process_32.c current->thread.kregs = (struct pt_regs *) current 229 arch/sparc/kernel/process_32.c (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); current 312 arch/sparc/kernel/process_32.c if(last_task_used_math == current) { current 360 arch/sparc/kernel/process_32.c ti->kpsr = current->thread.fork_kpsr | PSR_PIL; current 361 arch/sparc/kernel/process_32.c ti->kwim = current->thread.fork_kwim; current 400 arch/sparc/kernel/process_32.c childregs->u_regs[UREG_I0] = current->pid; current 425 arch/sparc/kernel/process_32.c fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, current 426 arch/sparc/kernel/process_32.c ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); current 433 arch/sparc/kernel/process_32.c if (current == last_task_used_math) { current 435 arch/sparc/kernel/process_32.c fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, current 436 arch/sparc/kernel/process_32.c ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); current 444 arch/sparc/kernel/process_32.c ¤t->thread.float_regs[0], current 446 arch/sparc/kernel/process_32.c fpregs->pr_fsr = current->thread.fsr; current 447 arch/sparc/kernel/process_32.c fpregs->pr_qcnt = current->thread.fpqdepth; current 452 arch/sparc/kernel/process_32.c ¤t->thread.fpqueue[0], current 469 arch/sparc/kernel/process_32.c if (!task || task == current || current 198 arch/sparc/kernel/process_64.c show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); current 560 arch/sparc/kernel/process_64.c current->comm, current->pid, current 667 arch/sparc/kernel/process_64.c t->kregs->u_regs[UREG_I0] = current->pid; current 778 arch/sparc/kernel/process_64.c if (!task || task == current || current 60 arch/sparc/kernel/ptrace_32.c if (target == current) current 138 arch/sparc/kernel/ptrace_32.c if (target == current) current 220 arch/sparc/kernel/ptrace_32.c if (target == current) current 264 arch/sparc/kernel/ptrace_32.c if (target == current) current 344 arch/sparc/kernel/ptrace_32.c unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4]; current 351 arch/sparc/kernel/ptrace_32.c view = task_user_regset_view(current); current 165 arch/sparc/kernel/ptrace_64.c if (target == current) { current 180 arch/sparc/kernel/ptrace_64.c if (target == current) { current 256 arch/sparc/kernel/ptrace_64.c if (target == current) current 306 arch/sparc/kernel/ptrace_64.c if (target == current) current 381 arch/sparc/kernel/ptrace_64.c if (target == current) current 448 arch/sparc/kernel/ptrace_64.c if (target == current) current 527 arch/sparc/kernel/ptrace_64.c if (target == current) current 539 arch/sparc/kernel/ptrace_64.c if (target == current) { current 565 arch/sparc/kernel/ptrace_64.c if (target == current) { current 639 arch/sparc/kernel/ptrace_64.c if (target == current) current 651 arch/sparc/kernel/ptrace_64.c if (target == current) { current 679 arch/sparc/kernel/ptrace_64.c if (target == current) { current 761 arch/sparc/kernel/ptrace_64.c if (target == current) current 813 arch/sparc/kernel/ptrace_64.c if (target == current) current 913 arch/sparc/kernel/ptrace_64.c const struct user_regset_view *view = task_user_regset_view(current); current 914 arch/sparc/kernel/ptrace_64.c compat_ulong_t caddr2 = task_pt_regs(current)->u_regs[UREG_I4]; current 1024 arch/sparc/kernel/ptrace_64.c const struct user_regset_view *view = task_user_regset_view(current); current 1025 arch/sparc/kernel/ptrace_64.c unsigned long addr2 = task_pt_regs(current)->u_regs[UREG_I4]; current 88 arch/sparc/kernel/setup_32.c if (!is_idle_task(current)) { current 97 arch/sparc/kernel/signal32.c current->restart_block.fn = do_no_restart_syscall; current 186 arch/sparc/kernel/signal32.c current->restart_block.fn = do_no_restart_syscall; current 318 arch/sparc/kernel/signal32.c pgdp = pgd_offset(current->mm, address); current 376 arch/sparc/kernel/signal32.c current->comm, current->pid, (unsigned long)sf, current 510 arch/sparc/kernel/signal32.c current->comm, current->pid, (unsigned long)sf, current 713 arch/sparc/kernel/signal32.c if (put_user(current->sas_ss_sp + current->sas_ss_size, current 730 arch/sparc/kernel/signal32.c if (current->sas_ss_sp && on_sig_stack(sp)) current 737 arch/sparc/kernel/signal32.c current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current 738 arch/sparc/kernel/signal32.c current->sas_ss_size = SIGSTKSZ; current 86 arch/sparc/kernel/signal_32.c current->restart_block.fn = do_no_restart_syscall; current 317 arch/sparc/kernel/signal_32.c flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); current 414 arch/sparc/kernel/signal_32.c flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); current 542 arch/sparc/kernel/signal_32.c if (put_user(current->sas_ss_sp + current->sas_ss_size, current 557 arch/sparc/kernel/signal_32.c if (current->sas_ss_sp && on_sig_stack(sp)) current 563 arch/sparc/kernel/signal_32.c current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ; current 564 arch/sparc/kernel/signal_32.c current->sas_ss_size = SIGSTKSZ; current 175 arch/sparc/kernel/signal_64.c err |= __put_user(current->blocked.sig[0], current 178 arch/sparc/kernel/signal_64.c err |= __copy_to_user(&ucp->uc_sigmask, ¤t->blocked, current 266 arch/sparc/kernel/signal_64.c current->restart_block.fn = do_no_restart_syscall; current 375 arch/sparc/kernel/signal_64.c current->comm, current->pid, (unsigned long)sf, current 19 arch/sparc/kernel/sigutil_32.c if (test_tsk_thread_flag(current, TIF_USEDFPU)) { current 21 arch/sparc/kernel/sigutil_32.c fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, current 22 arch/sparc/kernel/sigutil_32.c ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); current 24 arch/sparc/kernel/sigutil_32.c clear_tsk_thread_flag(current, TIF_USEDFPU); current 27 arch/sparc/kernel/sigutil_32.c if (current == last_task_used_math) { current 29 arch/sparc/kernel/sigutil_32.c fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, current 30 arch/sparc/kernel/sigutil_32.c ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); current 36 arch/sparc/kernel/sigutil_32.c ¤t->thread.float_regs[0], current 38 arch/sparc/kernel/sigutil_32.c err |= __put_user(current->thread.fsr, &fpu->si_fsr); current 39 arch/sparc/kernel/sigutil_32.c err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); current 40 arch/sparc/kernel/sigutil_32.c if (current->thread.fpqdepth != 0) current 42 arch/sparc/kernel/sigutil_32.c ¤t->thread.fpqueue[0], current 57 arch/sparc/kernel/sigutil_32.c if (test_tsk_thread_flag(current, TIF_USEDFPU)) current 60 arch/sparc/kernel/sigutil_32.c if (current == last_task_used_math) { current 66 arch/sparc/kernel/sigutil_32.c clear_tsk_thread_flag(current, TIF_USEDFPU); current 71 arch/sparc/kernel/sigutil_32.c err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], current 73 arch/sparc/kernel/sigutil_32.c err |= __get_user(current->thread.fsr, &fpu->si_fsr); current 74 arch/sparc/kernel/sigutil_32.c err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); current 75 arch/sparc/kernel/sigutil_32.c if (current->thread.fpqdepth != 0) current 76 arch/sparc/kernel/sigutil_32.c err |= __copy_from_user(¤t->thread.fpqueue[0], current 131 arch/sparc/kernel/smp_64.c current->active_mm = &init_mm; current 1123 arch/sparc/kernel/smp_64.c if (mm == current->mm && atomic_read(&mm->mm_users) == 1) current 1139 arch/sparc/kernel/smp_64.c if (mm == current->mm && atomic_read(&mm->mm_users) == 1) current 98 arch/sparc/kernel/sun4d_smp.c current->active_mm = &init_mm; current 64 arch/sparc/kernel/sun4m_smp.c current->active_mm = &init_mm; current 138 arch/sparc/kernel/sys_sparc_32.c current->comm, task_pid_nr(current), (int)regs->u_regs[1]); current 92 arch/sparc/kernel/sys_sparc_64.c struct mm_struct *mm = current->mm; current 153 arch/sparc/kernel/sys_sparc_64.c struct mm_struct *mm = current->mm; current 224 arch/sparc/kernel/sys_sparc_64.c get_area = current->mm->get_unmapped_area; current 270 arch/sparc/kernel/sys_sparc_64.c if (current->flags & PF_RANDOMIZE) { current 291 arch/sparc/kernel/sys_sparc_64.c (current->personality & ADDR_COMPAT_LAYOUT) || current 425 arch/sparc/kernel/sys_sparc_64.c if (personality(current->personality) == PER_LINUX32 && current 584 arch/sparc/kernel/sys_sparc_64.c current->pid, current->comm); current 62 arch/sparc/kernel/traps_32.c printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); current 119 arch/sparc/kernel/traps_32.c send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)pc, 0, current); current 127 arch/sparc/kernel/traps_32.c send_sig_fault(SIGILL, ILL_PRVOPC, (void __user *)pc, 0, current); current 148 arch/sparc/kernel/traps_32.c 0, current); current 168 arch/sparc/kernel/traps_32.c if(last_task_used_math == current) current 176 arch/sparc/kernel/traps_32.c last_task_used_math = current; current 178 arch/sparc/kernel/traps_32.c fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); current 189 arch/sparc/kernel/traps_32.c fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); current 210 arch/sparc/kernel/traps_32.c struct task_struct *fpt = current; current 257 arch/sparc/kernel/traps_32.c fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); current 308 arch/sparc/kernel/traps_32.c send_sig_fault(SIGEMT, EMT_TAGOVF, (void __user *)pc, 0, current); current 336 arch/sparc/kernel/traps_32.c send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, 0, current); current 346 arch/sparc/kernel/traps_32.c send_sig_fault(SIGILL, ILL_COPROC, (void __user *)pc, 0, current); current 352 arch/sparc/kernel/traps_32.c send_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)pc, 0, current); current 391 arch/sparc/kernel/traps_32.c current->active_mm = &init_mm; current 2466 arch/sparc/kernel/traps_64.c tsk = current; current 2469 arch/sparc/kernel/traps_64.c if (tsk == current) current 2537 arch/sparc/kernel/traps_64.c printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); current 2934 arch/sparc/kernel/traps_64.c current->active_mm = &init_mm; current 60 arch/sparc/kernel/unaligned_32.c die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs); current 227 arch/sparc/kernel/unaligned_32.c (current->mm ? current->mm->context : current 228 arch/sparc/kernel/unaligned_32.c current->active_mm->context)); current 230 arch/sparc/kernel/unaligned_32.c (current->mm ? (unsigned long) current->mm->pgd : current 231 arch/sparc/kernel/unaligned_32.c (unsigned long) current->active_mm->pgd)); current 315 arch/sparc/kernel/unaligned_32.c 0, current); current 322 arch/sparc/kernel/unaligned_32.c if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) || current 279 arch/sparc/kernel/unaligned_64.c (current->mm ? CTX_HWBITS(current->mm->context) : current 280 arch/sparc/kernel/unaligned_64.c CTX_HWBITS(current->active_mm->context))); current 282 arch/sparc/kernel/unaligned_64.c (current->mm ? (unsigned long) current->mm->pgd : current 283 arch/sparc/kernel/unaligned_64.c (unsigned long) current->active_mm->pgd)); current 192 arch/sparc/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 193 arch/sparc/kernel/uprobes.c struct arch_uprobe_task *autask = ¤t->utask->autask; current 218 arch/sparc/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 295 arch/sparc/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 100 arch/sparc/mm/fault_32.c unhandled_fault(address, current, ®s); current 132 arch/sparc/mm/fault_32.c addr, current); current 164 arch/sparc/mm/fault_32.c struct task_struct *tsk = current; current 240 arch/sparc/mm/fault_32.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 255 arch/sparc/mm/fault_32.c current->maj_flt++; current 259 arch/sparc/mm/fault_32.c current->min_flt++; current 381 arch/sparc/mm/fault_32.c struct task_struct *tsk = current; current 70 arch/sparc/mm/fault_64.c unhandled_fault(regs->tpc, current, regs); current 82 arch/sparc/mm/fault_64.c pgd_t *pgdp = pgd_offset(current->mm, tpc); current 174 arch/sparc/mm/fault_64.c show_signal_msg(regs, sig, code, addr, current); current 246 arch/sparc/mm/fault_64.c unhandled_fault (address, current, regs); current 256 arch/sparc/mm/fault_64.c current->comm, current->pid, current 264 arch/sparc/mm/fault_64.c struct mm_struct *mm = current->mm; current 424 arch/sparc/mm/fault_64.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 439 arch/sparc/mm/fault_64.c current->maj_flt++; current 443 arch/sparc/mm/fault_64.c current->min_flt++; current 65 arch/sparc/mm/hugetlbpage.c struct mm_struct *mm = current->mm; current 102 arch/sparc/mm/hugetlbpage.c struct mm_struct *mm = current->mm; current 2976 arch/sparc/mm/init_64.c if (mm == current->mm) current 2982 arch/sparc/mm/init_64.c struct mm_struct *mm = current->mm; current 1693 arch/sparc/mm/srmmu.c if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) current 268 arch/sparc/mm/tsb.c current->comm, current->pid, tsb_bytes); current 37 arch/sparc/power/hibernate.c struct mm_struct *mm = current->active_mm; current 364 arch/sparc/vdso/vma.c struct mm_struct *mm = current->mm; current 375 arch/sparc/vdso/vma.c if (current->flags & PF_RANDOMIZE) { current 393 arch/sparc/vdso/vma.c current->mm->context.vdso = (void __user *)text_start; current 423 arch/sparc/vdso/vma.c current->mm->context.vdso = NULL; current 126 arch/um/drivers/mconsole_kern.c struct vfsmount *mnt = task_active_pid_ns(current)->proc_mnt; current 27 arch/um/drivers/mconsole_kern.h #define CONFIG_CHUNK(str, size, current, chunk, end) \ current 29 arch/um/drivers/mconsole_kern.h current += strlen(chunk); \ current 30 arch/um/drivers/mconsole_kern.h if(current >= size) \ current 37 arch/um/drivers/mconsole_kern.h current++; \ current 70 arch/um/drivers/random.c DECLARE_WAITQUEUE(wait, current); current 92 arch/um/drivers/random.c if (signal_pending (current)) current 21 arch/um/include/asm/stacktrace.h if (!task || task == current) current 36 arch/um/include/asm/stacktrace.h if (!task || task == current) current 27 arch/um/kernel/exec.c arch_flush_thread(¤t->thread.arch); current 29 arch/um/kernel/exec.c ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); current 30 arch/um/kernel/exec.c ret = ret || unmap(¤t->mm->context.id, STUB_END, current 40 arch/um/kernel/exec.c __switch_mm(¤t->mm->context.id); current 47 arch/um/kernel/exec.c current->ptrace &= ~PT_DTRACE; current 592 arch/um/kernel/irq.c current->stack = to; current 92 arch/um/kernel/process.c arch_switch_to(current); current 94 arch/um/kernel/process.c return current->thread.prev_sched; current 99 arch/um/kernel/process.c struct pt_regs *regs = ¤t->thread.regs; current 111 arch/um/kernel/process.c return task_pid_nr(current); current 123 arch/um/kernel/process.c if (current->thread.prev_sched != NULL) current 124 arch/um/kernel/process.c schedule_tail(current->thread.prev_sched); current 125 arch/um/kernel/process.c current->thread.prev_sched = NULL; current 127 arch/um/kernel/process.c fn = current->thread.request.u.thread.proc; current 128 arch/um/kernel/process.c arg = current->thread.request.u.thread.arg; current 134 arch/um/kernel/process.c userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); current 142 arch/um/kernel/process.c schedule_tail(current->thread.prev_sched); current 149 arch/um/kernel/process.c arch_switch_to(current); current 151 arch/um/kernel/process.c current->thread.prev_sched = NULL; current 153 arch/um/kernel/process.c userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs); current 160 arch/um/kernel/process.c int kthread = current->flags & PF_KTHREAD; current 174 arch/um/kernel/process.c arch_copy_thread(¤t->thread.arch, &p->thread.arch); current 381 arch/um/kernel/process.c struct task_struct *task = t ? t : current; current 402 arch/um/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 413 arch/um/kernel/process.c if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) current 143 arch/um/kernel/ptrace.c int ptraced = current->ptrace; current 56 arch/um/kernel/signal.c if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) current 83 arch/um/kernel/signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 131 arch/um/kernel/signal.c if (current->ptrace & PT_DTRACE) current 132 arch/um/kernel/signal.c current->thread.singlestep_syscall = current 133 arch/um/kernel/signal.c is_syscall(PT_REGS_IP(¤t->thread.regs)); current 63 arch/um/kernel/skas/mmu.c if (current->mm != NULL && current->mm != &init_mm) current 64 arch/um/kernel/skas/mmu.c from_mm = ¤t->mm->context; current 26 arch/um/kernel/skas/process.c cpu_tasks[0].task = current; current 51 arch/um/kernel/skas/process.c if (current->mm == NULL) current 54 arch/um/kernel/skas/process.c return current->mm->context.id.stack; current 43 arch/um/kernel/skas/uaccess.c pte_t *pte = virt_to_pte(current->mm, virt); current 51 arch/um/kernel/skas/uaccess.c pte = virt_to_pte(current->mm, virt); current 67 arch/um/kernel/stacktrace.c __save_stack_trace(current, trace); current 31 arch/um/kernel/sysrq.c struct pt_regs *segv_regs = current->thread.segv_regs; current 55 arch/um/kernel/sysrq.c dump_trace(current, &stackops, NULL); current 329 arch/um/kernel/tlb.c "process: %d\n", task_tgid_vnr(current)); current 331 arch/um/kernel/tlb.c up_write(¤t->mm->mmap_sem); current 333 arch/um/kernel/tlb.c do_signal(¤t->thread.regs); current 528 arch/um/kernel/tlb.c if (atomic_read(¤t->mm->mm_users) == 0) current 531 arch/um/kernel/tlb.c flush_tlb_mm(current->mm); current 589 arch/um/kernel/tlb.c struct mm_struct *mm = current->mm; current 28 arch/um/kernel/trap.c struct mm_struct *mm = current->mm; current 79 arch/um/kernel/trap.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 95 arch/um/kernel/trap.c current->maj_flt++; current 97 arch/um/kernel/trap.c current->min_flt++; current 144 arch/um/kernel/trap.c struct task_struct *tsk = current; current 165 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; current 172 arch/um/kernel/trap.c do_signal(¤t->thread.regs); current 219 arch/um/kernel/trap.c current->thread.segv_regs = container_of(regs, struct pt_regs, regs); current 225 arch/um/kernel/trap.c else if (current->mm == NULL) { current 248 arch/um/kernel/trap.c catcher = current->thread.fault_catcher; current 252 arch/um/kernel/trap.c current->thread.fault_addr = (void *) address; current 255 arch/um/kernel/trap.c else if (current->thread.fault_addr != NULL) current 269 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; current 273 arch/um/kernel/trap.c current->thread.arch.faultinfo = fi; current 279 arch/um/kernel/trap.c current->thread.segv_regs = NULL; current 303 arch/um/kernel/trap.c current->thread.arch.faultinfo = *fi; current 314 arch/um/kernel/trap.c if (current->thread.fault_catcher != NULL) current 315 arch/um/kernel/trap.c UML_LONGJMP(current->thread.fault_catcher, 1); current 58 arch/unicore32/kernel/fpu-ucf64.c current->thread.error_code = 0; current 59 arch/unicore32/kernel/fpu-ucf64.c current->thread.trap_no = 6; current 63 arch/unicore32/kernel/fpu-ucf64.c current); current 198 arch/unicore32/kernel/process.c task_pid_nr(current), current->comm); current 206 arch/unicore32/kernel/process.c struct task_struct *tsk = current; current 278 arch/unicore32/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 308 arch/unicore32/kernel/process.c struct mm_struct *mm = current->mm; current 119 arch/unicore32/kernel/ptrace.c if (!(current->ptrace & PT_PTRACED)) current 133 arch/unicore32/kernel/ptrace.c ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) current 140 arch/unicore32/kernel/ptrace.c if (current->exit_code) { current 141 arch/unicore32/kernel/ptrace.c send_sig(current->exit_code, current, 1); current 142 arch/unicore32/kernel/ptrace.c current->exit_code = 0; current 105 arch/unicore32/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 172 arch/unicore32/kernel/signal.c err |= __put_user(current->thread.trap_no, current 174 arch/unicore32/kernel/signal.c err |= __put_user(current->thread.error_code, current 176 arch/unicore32/kernel/signal.c err |= __put_user(current->thread.address, current 195 arch/unicore32/kernel/signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 103 arch/unicore32/kernel/stacktrace.c if (tsk != current) { current 124 arch/unicore32/kernel/stacktrace.c save_stack_trace_tsk(current, trace); current 146 arch/unicore32/kernel/traps.c tsk = current; current 151 arch/unicore32/kernel/traps.c } else if (tsk != current) { current 245 arch/unicore32/kernel/traps.c current->thread.error_code = err; current 246 arch/unicore32/kernel/traps.c current->thread.trap_no = trap; current 119 arch/unicore32/mm/fault.c struct task_struct *tsk = current; current 129 arch/unicore32/mm/fault.c struct task_struct *tsk = current; current 207 arch/unicore32/mm/fault.c tsk = current; current 253 arch/unicore32/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 460 arch/unicore32/mm/mmu.c pgd = current->active_mm->pgd; current 78 arch/x86/entry/common.c BUG_ON(regs != task_pt_regs(current)); current 155 arch/x86/entry/common.c klp_update_patch_state(current); current 367 arch/x86/entry/common.c unsigned long landing_pad = (unsigned long)current->mm->context.vdso + current 63 arch/x86/entry/vdso/vma.c (unsigned long)current->mm->context.vdso; current 76 arch/x86/entry/vdso/vma.c const struct vdso_image *image = current->mm->context.vdso_image; current 82 arch/x86/entry/vdso/vma.c current->mm->context.vdso = (void __user *)new_vma->vm_start; current 148 arch/x86/entry/vdso/vma.c struct mm_struct *mm = current->mm; current 191 arch/x86/entry/vdso/vma.c current->mm->context.vdso = (void __user *)text_start; current 192 arch/x86/entry/vdso/vma.c current->mm->context.vdso_image = image; current 247 arch/x86/entry/vdso/vma.c unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start); current 255 arch/x86/entry/vdso/vma.c struct mm_struct *mm = current->mm; current 80 arch/x86/entry/vsyscall/vsyscall_64.c level, current->comm, task_pid_nr(current), current 107 arch/x86/entry/vsyscall/vsyscall_64.c struct thread_struct *thread = ¤t->thread; current 175 arch/x86/entry/vsyscall/vsyscall_64.c tsk = current; current 239 arch/x86/entry/vsyscall/vsyscall_64.c prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err; current 240 arch/x86/entry/vsyscall/vsyscall_64.c current->thread.sig_on_uaccess_err = 1; current 264 arch/x86/entry/vsyscall/vsyscall_64.c current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err; current 2380 arch/x86/events/core.c unwind_start(&state, current, regs, NULL); current 2382 arch/x86/events/core.c unwind_start(&state, current, NULL, (void *)regs->sp); current 2407 arch/x86/events/core.c ldt = READ_ONCE(current->active_mm->context.ldt); current 860 arch/x86/events/intel/lbr.c if (!current->mm) current 77 arch/x86/ia32/ia32_aout.c current->mm->arg_start = (unsigned long) p; current 87 arch/x86/ia32/ia32_aout.c current->mm->arg_end = current->mm->env_start = (unsigned long) p; current 97 arch/x86/ia32/ia32_aout.c current->mm->env_end = (unsigned long) p; current 148 arch/x86/ia32/ia32_aout.c current->mm->end_code = ex.a_text + current 149 arch/x86/ia32/ia32_aout.c (current->mm->start_code = N_TXTADDR(ex)); current 150 arch/x86/ia32/ia32_aout.c current->mm->end_data = ex.a_data + current 151 arch/x86/ia32/ia32_aout.c (current->mm->start_data = N_DATADDR(ex)); current 152 arch/x86/ia32/ia32_aout.c current->mm->brk = ex.a_bss + current 153 arch/x86/ia32/ia32_aout.c (current->mm->start_brk = N_BSSADDR(ex)); current 225 arch/x86/ia32/ia32_aout.c error = set_brk(current->mm->start_brk, current->mm->brk); current 231 arch/x86/ia32/ia32_aout.c current->mm->start_stack = current 239 arch/x86/ia32/ia32_aout.c (regs)->sp = current->mm->start_stack; current 80 arch/x86/ia32/ia32_signal.c current->restart_block.fn = do_no_restart_syscall; current 200 arch/x86/ia32/ia32_signal.c put_user_ex(current->thread.trap_nr, &sc->trapno); current 201 arch/x86/ia32/ia32_signal.c put_user_ex(current->thread.error_code, &sc->err); current 212 arch/x86/ia32/ia32_signal.c put_user_ex(current->thread.cr2, &sc->cr2); current 292 arch/x86/ia32/ia32_signal.c if (current->mm->context.vdso) current 293 arch/x86/ia32/ia32_signal.c restorer = current->mm->context.vdso + current 372 arch/x86/ia32/ia32_signal.c restorer = current->mm->context.vdso + current 177 arch/x86/include/asm/compat.h (!!(task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)) current 202 arch/x86/include/asm/compat.h sp = task_pt_regs(current)->sp; current 205 arch/x86/include/asm/compat.h sp = task_pt_regs(current)->sp - 128; current 214 arch/x86/include/asm/compat.h if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT) current 186 arch/x86/include/asm/elf.h elf_common_init(¤t->thread, _r, 0) current 189 arch/x86/include/asm/elf.h elf_common_init(¤t->thread, regs, __USER_DS) current 332 arch/x86/include/asm/elf.h (unsigned long __force)current->mm->context.vdso); \ current 340 arch/x86/include/asm/elf.h (unsigned long __force)current->mm->context.vdso); \ current 355 arch/x86/include/asm/elf.h #define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) current 358 arch/x86/include/asm/elf.h ((unsigned long)current->mm->context.vdso + \ current 536 arch/x86/include/asm/fpu/internal.h struct fpu *fpu = ¤t->thread.fpu; current 539 arch/x86/include/asm/fpu/internal.h if (WARN_ON_ONCE(current->flags & PF_KTHREAD)) current 574 arch/x86/include/asm/fpu/internal.h if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) { current 610 arch/x86/include/asm/fpu/internal.h if (current->mm) { current 316 arch/x86/include/asm/mmu_context.h if (!current->mm) current 323 arch/x86/include/asm/mmu_context.h if (current->mm != vma->vm_mm) current 383 arch/x86/include/asm/mmu_context.h switch_mm_irqs_off(NULL, mm, current); current 405 arch/x86/include/asm/mmu_context.h switch_mm_irqs_off(NULL, prev_state.mm, current); current 39 arch/x86/include/asm/page_types.h (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ current 143 arch/x86/include/asm/pgtable.h pk = get_xsave_addr(¤t->thread.fpu.state.xsave, XFEATURE_PKRU); current 876 arch/x86/include/asm/processor.h #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ current 65 arch/x86/include/asm/resctrl_sched.h if (current->closid) current 66 arch/x86/include/asm/resctrl_sched.h closid = current->closid; current 70 arch/x86/include/asm/resctrl_sched.h if (current->rmid) current 71 arch/x86/include/asm/resctrl_sched.h rmid = current->rmid; current 85 arch/x86/include/asm/stackprotector.h current->stack_canary = canary; current 64 arch/x86/include/asm/stacktrace.h if (task == current) current 83 arch/x86/include/asm/stacktrace.h if (task == current) current 255 arch/x86/include/asm/tlbflush.h struct mm_struct *current_mm = current->mm; current 28 arch/x86/include/asm/uaccess.h #define get_fs() (current->thread.addr_limit) current 31 arch/x86/include/asm/uaccess.h current->thread.addr_limit = fs; current 37 arch/x86/include/asm/uaccess.h #define user_addr_max() (current->thread.addr_limit.seg) current 493 arch/x86/include/asm/uaccess.h current->thread.uaccess_err = 0; \ current 498 arch/x86/include/asm/uaccess.h current->thread.uaccess_err = 0; \ current 503 arch/x86/include/asm/uaccess.h (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \ current 110 arch/x86/include/asm/unwind.h if (task == current) \ current 120 arch/x86/include/asm/unwind.h return task != current && task->on_cpu; current 913 arch/x86/kernel/apm_32.c task_cputime(current, &utime, &stime); current 984 arch/x86/kernel/apm_32.c set_cpus_allowed_ptr(current, cpumask_of(0)); current 1435 arch/x86/kernel/apm_32.c DECLARE_WAITQUEUE(wait, current); current 1500 arch/x86/kernel/apm_32.c if (signal_pending(current)) current 1731 arch/x86/kernel/apm_32.c set_cpus_allowed_ptr(current, cpumask_of(0)); current 1212 arch/x86/kernel/cpu/bugs.c if (tsk == current) current 1875 arch/x86/kernel/cpu/common.c me = current; current 1952 arch/x86/kernel/cpu/common.c struct task_struct *curr = current; current 1502 arch/x86/kernel/cpu/resctrl/pseudo_lock.c if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { current 530 arch/x86/kernel/cpu/resctrl/rdtgroup.c current->closid = 0; current 531 arch/x86/kernel/cpu/resctrl/rdtgroup.c current->rmid = 0; current 654 arch/x86/kernel/cpu/resctrl/rdtgroup.c tsk = current; current 284 arch/x86/kernel/dumpstack.c task = task ? : current; current 290 arch/x86/kernel/dumpstack.c if (!sp && task == current) current 291 arch/x86/kernel/dumpstack.c sp = get_stack_pointer(current, NULL); current 298 arch/x86/kernel/dumpstack.c show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); current 333 arch/x86/kernel/dumpstack.c if (regs && kexec_should_crash(current)) current 363 arch/x86/kernel/dumpstack.c kasan_unpoison_task_stack(current); current 392 arch/x86/kernel/dumpstack.c current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) current 423 arch/x86/kernel/dumpstack.c show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); current 91 arch/x86/kernel/dumpstack_32.c task = task ? : current; current 96 arch/x86/kernel/dumpstack_32.c if (task != current) current 159 arch/x86/kernel/dumpstack_64.c task = task ? : current; current 164 arch/x86/kernel/dumpstack_64.c if (task != current) current 94 arch/x86/kernel/fpu/core.c if (!(current->flags & PF_KTHREAD) && current 101 arch/x86/kernel/fpu/core.c copy_fpregs_to_fpstate(¤t->thread.fpu); current 123 arch/x86/kernel/fpu/core.c WARN_ON_FPU(fpu != ¤t->thread.fpu); current 177 arch/x86/kernel/fpu/core.c WARN_ON_FPU(src_fpu != ¤t->thread.fpu); current 216 arch/x86/kernel/fpu/core.c WARN_ON_FPU(fpu != ¤t->thread.fpu); current 239 arch/x86/kernel/fpu/core.c if (fpu == ¤t->thread.fpu) current 261 arch/x86/kernel/fpu/core.c WARN_ON_FPU(fpu == ¤t->thread.fpu); current 280 arch/x86/kernel/fpu/core.c if (fpu == ¤t->thread.fpu) { current 323 arch/x86/kernel/fpu/core.c WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ current 355 arch/x86/kernel/fpu/core.c struct fpu *fpu = ¤t->thread.fpu; current 367 arch/x86/kernel/fpu/core.c struct fpu *fpu = ¤t->thread.fpu; current 38 arch/x86/kernel/fpu/init.c fpstate_init_soft(¤t->thread.fpu.state.soft); current 251 arch/x86/kernel/fpu/regset.c if (tsk == current) { current 368 arch/x86/kernel/fpu/regset.c struct task_struct *tsk = current; current 166 arch/x86/kernel/fpu/signal.c struct task_struct *tsk = current; current 177 arch/x86/kernel/fpu/signal.c return fpregs_soft_get(current, NULL, 0, current 277 arch/x86/kernel/fpu/signal.c struct task_struct *tsk = current; current 296 arch/x86/kernel/fpu/signal.c return fpregs_soft_set(current, NULL, current 885 arch/x86/kernel/fpu/xstate.c struct fpu *fpu = ¤t->thread.fpu; current 1042 arch/x86/kernel/ftrace.c if (unlikely(atomic_read(¤t->tracing_graph_pause))) current 372 arch/x86/kernel/hw_breakpoint.c struct thread_struct *thread = ¤t->thread; current 388 arch/x86/kernel/hw_breakpoint.c dump->u_debugreg[6] = current->thread.debugreg6; current 417 arch/x86/kernel/hw_breakpoint.c set_debugreg(current->thread.debugreg6, 6); current 465 arch/x86/kernel/hw_breakpoint.c current->thread.debugreg6 &= ~DR_TRAP_BITS; current 512 arch/x86/kernel/hw_breakpoint.c if ((current->thread.debugreg6 & DR_TRAP_BITS) || current 29 arch/x86/kernel/ioport.c struct thread_struct *t = ¤t->thread; current 119 arch/x86/kernel/ioport.c struct thread_struct *t = ¤t->thread; current 629 arch/x86/kernel/kgdb.c struct task_struct *tsk = current; current 780 arch/x86/kernel/kprobes/core.c kretprobe_hash_lock(current, &head, &flags); current 806 arch/x86/kernel/kprobes/core.c if (ri->task != current) current 842 arch/x86/kernel/kprobes/core.c if (ri->task != current) current 867 arch/x86/kernel/kprobes/core.c kretprobe_hash_unlock(current, &flags); current 125 arch/x86/kernel/kvm.c n.halted = is_idle_task(current) || current 412 arch/x86/kernel/ldt.c struct mm_struct *mm = current->mm; current 466 arch/x86/kernel/ldt.c struct mm_struct *mm = current->mm; current 107 arch/x86/kernel/perf_regs.c regs_user->regs = task_pt_regs(current); current 108 arch/x86/kernel/perf_regs.c regs_user->abi = perf_reg_abi(current); current 136 arch/x86/kernel/perf_regs.c struct pt_regs *user_regs = task_pt_regs(current); current 137 arch/x86/kernel/process.c struct task_struct *tsk = current; current 264 arch/x86/kernel/process.c task_spec_ssb_noexec(current)) { current 266 arch/x86/kernel/process.c task_clear_spec_ssb_disable(current); current 267 arch/x86/kernel/process.c task_clear_spec_ssb_noexec(current); current 268 arch/x86/kernel/process.c speculation_ctrl_update(task_thread_info(current)->flags); current 486 arch/x86/kernel/process.c speculation_ctrl_update(speculation_ctrl_update_tif(current)); current 786 arch/x86/kernel/process.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 807 arch/x86/kernel/process.c if (p == current || p->state == TASK_RUNNING) current 154 arch/x86/kernel/process_32.c tsk = current; current 305 arch/x86/kernel/process_32.c return do_arch_prctl_common(current, option, arg2); current 216 arch/x86/kernel/process_64.c save_fsgs(current); current 336 arch/x86/kernel/process_64.c if (task == current) current 350 arch/x86/kernel/process_64.c if (task == current) current 362 arch/x86/kernel/process_64.c WARN_ON_ONCE(task == current); current 369 arch/x86/kernel/process_64.c WARN_ON_ONCE(task == current); current 381 arch/x86/kernel/process_64.c struct task_struct *me = current; current 629 arch/x86/kernel/process_64.c task_pt_regs(current)->orig_ax = __NR_execve; current 633 arch/x86/kernel/process_64.c if (current->mm) current 634 arch/x86/kernel/process_64.c current->mm->context.ia32_compat = 0; current 640 arch/x86/kernel/process_64.c current->personality &= ~READ_IMPLIES_EXEC; current 648 arch/x86/kernel/process_64.c if (current->mm) current 649 arch/x86/kernel/process_64.c current->mm->context.ia32_compat = TIF_X32; current 650 arch/x86/kernel/process_64.c current->personality &= ~READ_IMPLIES_EXEC; current 659 arch/x86/kernel/process_64.c task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; current 669 arch/x86/kernel/process_64.c if (current->mm) current 670 arch/x86/kernel/process_64.c current->mm->context.ia32_compat = TIF_IA32; current 671 arch/x86/kernel/process_64.c current->personality |= force_personality32; current 673 arch/x86/kernel/process_64.c task_pt_regs(current)->orig_ax = __NR_ia32_execve; current 719 arch/x86/kernel/process_64.c if (task == current) { current 749 arch/x86/kernel/process_64.c if (task == current) { current 803 arch/x86/kernel/process_64.c ret = do_arch_prctl_64(current, option, arg2); current 805 arch/x86/kernel/process_64.c ret = do_arch_prctl_common(current, option, arg2); current 813 arch/x86/kernel/process_64.c return do_arch_prctl_common(current, option, arg2); current 173 arch/x86/kernel/ptrace.c if (task == current) current 211 arch/x86/kernel/ptrace.c if (task == current) current 239 arch/x86/kernel/ptrace.c if (task == current) { current 246 arch/x86/kernel/ptrace.c if (task == current) { current 252 arch/x86/kernel/ptrace.c if (task == current) { current 258 arch/x86/kernel/ptrace.c if (task == current) { current 283 arch/x86/kernel/ptrace.c if (task == current) current 288 arch/x86/kernel/ptrace.c if (task == current) current 293 arch/x86/kernel/ptrace.c if (task == current) current 298 arch/x86/kernel/ptrace.c if (task == current) current 480 arch/x86/kernel/ptrace.c struct thread_struct *thread = &(current->thread); current 774 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 781 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 788 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 795 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 1165 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 1172 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 1179 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 1186 arch/x86/kernel/ptrace.c task_user_regset_view(current), current 1337 arch/x86/kernel/ptrace.c struct task_struct *tsk = current; current 107 arch/x86/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 192 arch/x86/kernel/signal.c put_user_ex(current->thread.trap_nr, &sc->trapno); current 193 arch/x86/kernel/signal.c put_user_ex(current->thread.error_code, &sc->err); current 212 arch/x86/kernel/signal.c put_user_ex(current->thread.cr2, &sc->cr2); current 257 arch/x86/kernel/signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 337 arch/x86/kernel/signal.c if (current->mm->context.vdso) current 338 arch/x86/kernel/signal.c restorer = current->mm->context.vdso + current 402 arch/x86/kernel/signal.c restorer = current->mm->context.vdso + current 714 arch/x86/kernel/signal.c struct fpu *fpu = ¤t->thread.fpu; current 720 arch/x86/kernel/signal.c if (syscall_get_nr(current, regs) >= 0) { current 722 arch/x86/kernel/signal.c switch (syscall_get_error(current, regs)) { current 748 arch/x86/kernel/signal.c user_disable_single_step(current); current 822 arch/x86/kernel/signal.c if (syscall_get_nr(current, regs) >= 0) { current 824 arch/x86/kernel/signal.c switch (syscall_get_error(current, regs)) { current 848 arch/x86/kernel/signal.c struct task_struct *me = current; current 853 arch/x86/kernel/signal.c task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG, current 190 arch/x86/kernel/step.c if (task == current) current 36 arch/x86/kernel/sys_x86_64.c if (!(current->flags & PF_RANDOMIZE)) current 118 arch/x86/kernel/sys_x86_64.c if (current->flags & PF_RANDOMIZE) { current 135 arch/x86/kernel/sys_x86_64.c struct mm_struct *mm = current->mm; current 179 arch/x86/kernel/sys_x86_64.c struct mm_struct *mm = current->mm; current 23 arch/x86/kernel/tls.c struct thread_struct *t = ¤t->thread; current 105 arch/x86/kernel/tls.c if (t == ¤t->thread) current 154 arch/x86/kernel/tls.c if (p == current) { current 193 arch/x86/kernel/tls.c return do_set_thread_area(current, -1, u_info, 1); current 245 arch/x86/kernel/tls.c return do_get_thread_area(current, -1, u_info); current 248 arch/x86/kernel/traps.c struct task_struct *tsk = current; current 305 arch/x86/kernel/traps.c (void *)fault_address, current->stack, current 306 arch/x86/kernel/traps.c (char *)current->stack + THREAD_SIZE - 1); current 319 arch/x86/kernel/traps.c struct task_struct *tsk = current; current 472 arch/x86/kernel/traps.c struct task_struct *tsk = current; current 537 arch/x86/kernel/traps.c tsk = current; current 711 arch/x86/kernel/traps.c struct task_struct *tsk = current; current 820 arch/x86/kernel/traps.c struct task_struct *task = current; current 120 arch/x86/kernel/umip.c struct task_struct *tsk = current; current 285 arch/x86/kernel/umip.c struct task_struct *tsk = current; current 326 arch/x86/kernel/unwind_frame.c if (state->task != current) current 322 arch/x86/kernel/unwind_orc.c if (task != current && state->sp == task->thread.sp) { current 643 arch/x86/kernel/unwind_orc.c } else if (task == current) { current 472 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 483 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 555 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 914 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 923 arch/x86/kernel/uprobes.c utask->autask.saved_trap_nr = current->thread.trap_nr; current 924 arch/x86/kernel/uprobes.c current->thread.trap_nr = UPROBE_TRAP_NR; current 928 arch/x86/kernel/uprobes.c if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) current 929 arch/x86/kernel/uprobes.c set_task_blockstep(current, false); current 961 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 965 arch/x86/kernel/uprobes.c WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current 966 arch/x86/kernel/uprobes.c current->thread.trap_nr = utask->autask.saved_trap_nr; current 988 arch/x86/kernel/uprobes.c send_sig(SIGTRAP, current, 0); current 1032 arch/x86/kernel/uprobes.c struct uprobe_task *utask = current->utask; current 1037 arch/x86/kernel/uprobes.c current->thread.trap_nr = utask->autask.saved_trap_nr; current 1055 arch/x86/kernel/uprobes.c send_sig(SIGTRAP, current, 0); current 1078 arch/x86/kernel/uprobes.c current->pid, regs->sp, regs->ip); current 87 arch/x86/kernel/vm86_32.c #define VFLAGS (*(unsigned short *)&(current->thread.vm86->veflags)) current 88 arch/x86/kernel/vm86_32.c #define VEFLAGS (current->thread.vm86->veflags) current 98 arch/x86/kernel/vm86_32.c struct task_struct *tsk = current; current 100 arch/x86/kernel/vm86_32.c struct vm86 *vm86 = current->thread.vm86; current 241 arch/x86/kernel/vm86_32.c struct task_struct *tsk = current; current 268 arch/x86/kernel/vm86_32.c current->comm, task_pid_nr(current), current 422 arch/x86/kernel/vm86_32.c set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask); current 432 arch/x86/kernel/vm86_32.c set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask); current 447 arch/x86/kernel/vm86_32.c return flags | (VEFLAGS & current->thread.vm86->veflags_mask); current 542 arch/x86/kernel/vm86_32.c struct vm86 *vm86 = current->thread.vm86; current 572 arch/x86/kernel/vm86_32.c struct vm86 *vm86 = current->thread.vm86; current 584 arch/x86/kernel/vm86_32.c current->thread.trap_nr = trapno; current 585 arch/x86/kernel/vm86_32.c current->thread.error_code = error_code; current 597 arch/x86/kernel/vm86_32.c struct vm86plus_info_struct *vmpi = ¤t->thread.vm86->vm86plus; current 827 arch/x86/kernel/vm86_32.c if (vm86_irqs[irqnumber].tsk != current) return 0; current 861 arch/x86/kernel/vm86_32.c vm86_irqs[irq].tsk = current; current 867 arch/x86/kernel/vm86_32.c if (vm86_irqs[irqnumber].tsk != current) return -EPERM; current 1075 arch/x86/kvm/hyperv.c task_cputime_adjusted(current, &utime, &stime); current 666 arch/x86/kvm/i8254.c pid = get_pid(task_tgid(current)); current 3385 arch/x86/kvm/mmu.c kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); current 153 arch/x86/kvm/paging_tmpl.h down_read(¤t->mm->mmap_sem); current 154 arch/x86/kvm/paging_tmpl.h vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE); current 156 arch/x86/kvm/paging_tmpl.h up_read(¤t->mm->mmap_sem); current 163 arch/x86/kvm/paging_tmpl.h up_read(¤t->mm->mmap_sem); current 168 arch/x86/kvm/paging_tmpl.h up_read(¤t->mm->mmap_sem); current 130 arch/x86/kvm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, current 2362 arch/x86/kvm/svm.c wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase); current 1149 arch/x86/kvm/vmx/vmx.c if (likely(is_64bit_mm(current->mm))) { current 1151 arch/x86/kvm/vmx/vmx.c fs_sel = current->thread.fsindex; current 1152 arch/x86/kvm/vmx/vmx.c gs_sel = current->thread.gsindex; current 1153 arch/x86/kvm/vmx/vmx.c fs_base = current->thread.fsbase; current 1154 arch/x86/kvm/vmx/vmx.c vmx->msr_host_kernel_gs_base = current->thread.gsbase; current 5263 arch/x86/kvm/vmx/vmx.c if (signal_pending(current)) current 2681 arch/x86/kvm/x86.c st->steal += current->sched_info.run_delay - current 2683 arch/x86/kvm/x86.c vcpu->arch.st.last_steal = current->sched_info.run_delay; current 8218 arch/x86/kvm/x86.c || need_resched() || signal_pending(current)) { current 8415 arch/x86/kvm/x86.c if (signal_pending(current)) { current 8520 arch/x86/kvm/x86.c memcpy(&fpu->state, ¤t->thread.fpu.state, current 8576 arch/x86/kvm/x86.c if (signal_pending(current)) { current 9693 arch/x86/kvm/x86.c if (current->mm == kvm->mm) { current 586 arch/x86/lib/insn-eval.c mutex_lock(¤t->active_mm->context.lock); current 587 arch/x86/lib/insn-eval.c ldt = current->active_mm->context.ldt; current 593 arch/x86/lib/insn-eval.c mutex_unlock(¤t->active_mm->context.lock); current 56 arch/x86/math-emu/fpu_aux.c fpstate_init_soft(¤t->thread.fpu.state.soft); current 231 arch/x86/math-emu/fpu_entry.c current->thread.trap_nr = X86_TRAP_MF; current 232 arch/x86/math-emu/fpu_entry.c current->thread.error_code = 0; current 233 arch/x86/math-emu/fpu_entry.c send_sig(SIGFPE, current, 1); current 624 arch/x86/math-emu/fpu_entry.c current->thread.trap_nr = X86_TRAP_MF; current 625 arch/x86/math-emu/fpu_entry.c current->thread.error_code = 0; current 626 arch/x86/math-emu/fpu_entry.c send_sig(signal, current, 1); current 30 arch/x86/math-emu/fpu_system.h mutex_lock(¤t->mm->context.lock); current 31 arch/x86/math-emu/fpu_system.h if (current->mm->context.ldt && seg < current->mm->context.ldt->nr_entries) current 32 arch/x86/math-emu/fpu_system.h ret = current->mm->context.ldt->entries[seg]; current 33 arch/x86/math-emu/fpu_system.h mutex_unlock(¤t->mm->context.lock); current 76 arch/x86/math-emu/fpu_system.h #define I387 (¤t->thread.fpu.state) current 18 arch/x86/mm/debug_pagetables.c if (current->mm->pgd) { current 19 arch/x86/mm/debug_pagetables.c down_read(¤t->mm->mmap_sem); current 20 arch/x86/mm/debug_pagetables.c ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); current 21 arch/x86/mm/debug_pagetables.c up_read(¤t->mm->mmap_sem); current 31 arch/x86/mm/debug_pagetables.c if (current->mm->pgd) { current 32 arch/x86/mm/debug_pagetables.c down_read(¤t->mm->mmap_sem); current 33 arch/x86/mm/debug_pagetables.c ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); current 34 arch/x86/mm/debug_pagetables.c up_read(¤t->mm->mmap_sem); current 138 arch/x86/mm/extable.c current->thread.uaccess_err = 1; current 125 arch/x86/mm/fault.c instr = (void *)convert_ip_to_linear(current, regs); current 693 arch/x86/mm/fault.c tsk = current; current 709 arch/x86/mm/fault.c struct task_struct *tsk = current; current 732 arch/x86/mm/fault.c struct task_struct *tsk = current; current 761 arch/x86/mm/fault.c if (current->thread.sig_on_uaccess_err && signal) { current 891 arch/x86/mm/fault.c struct task_struct *tsk = current; current 948 arch/x86/mm/fault.c struct mm_struct *mm = current->mm; current 1037 arch/x86/mm/fault.c struct task_struct *tsk = current; current 1058 arch/x86/mm/fault.c if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) { current 1314 arch/x86/mm/fault.c tsk = current; current 1519 arch/x86/mm/fault.c prefetchw(¤t->mm->mmap_sem); current 148 arch/x86/mm/hugetlbpage.c struct mm_struct *mm = current->mm; current 42 arch/x86/mm/mmap.c if (current->flags & PF_RANDOMIZE) { current 62 arch/x86/mm/mmap.c if (current->personality & ADDR_COMPAT_LAYOUT) current 70 arch/x86/mm/mmap.c if (!(current->flags & PF_RANDOMIZE)) current 153 arch/x86/mm/mmap.c struct mm_struct *mm = current->mm; current 48 arch/x86/mm/mpx.c struct mm_struct *mm = current->mm; current 215 arch/x86/mm/mpx.c struct mm_struct *mm = current->mm; current 236 arch/x86/mm/mpx.c current->comm, current->pid); current 250 arch/x86/mm/mpx.c struct mm_struct *mm = current->mm; current 377 arch/x86/mm/mpx.c struct mm_struct *mm = current->mm; current 408 arch/x86/mm/mpx.c if (!kernel_managing_mpx_tables(current->mm)) current 894 arch/x86/mm/mpx.c if (!kernel_managing_mpx_tables(current->mm)) current 922 arch/x86/mm/mpx.c if (!kernel_managing_mpx_tables(current->mm)) current 658 arch/x86/mm/pat.c current->comm, current->pid, start, end - 1); current 866 arch/x86/mm/pat.c current->comm, current->pid, current 901 arch/x86/mm/pat.c current->comm, current->pid, current 922 arch/x86/mm/pat.c current->comm, current->pid, current 161 arch/x86/mm/pat_rbtree.c current->comm, current->pid, start, end, current 46 arch/x86/mm/pkeys.c ret = arch_set_user_pkey_access(current, execute_only_pkey, current 52 arch/x86/oprofile/backtrace.c if (!current || !test_thread_flag(TIF_IA32)) current 107 arch/x86/oprofile/backtrace.c for (unwind_start(&state, current, regs, NULL); current 630 arch/x86/platform/efi/efi_64.c efi_scratch.prev_mm = current->active_mm; current 631 arch/x86/platform/efi/efi_64.c current->active_mm = mm; current 671 arch/x86/platform/uv/uv_nmi.c cpu, current->pid, current->comm, (void *)regs->ip); current 688 arch/x86/platform/uv/uv_nmi.c if (current->pid != 0 || !uv_nmi_action_is("ips")) current 177 arch/x86/power/cpu.c load_mm_ldt(current->active_mm); /* This does lldt */ current 171 arch/x86/um/asm/elf.h (pr_reg)[21] = current->thread.arch.fs; \ current 19 arch/x86/um/asm/processor.h (address + 65536 + 32 * sizeof(unsigned long) >= UPT_SP(¤t->thread.regs.regs)) current 14 arch/x86/um/asm/vm-flags.h ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ current 58 arch/x86/um/ldt.c uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; current 123 arch/x86/um/ldt.c uml_ldt_t *ldt = ¤t->mm->context.arch.ldt; current 124 arch/x86/um/ldt.c struct mm_id * mm_idp = ¤t->mm->context.id; current 39 arch/x86/um/ptrace_32.c n = access_process_vm(current, addr, &instr, sizeof(instr), current 215 arch/x86/um/ptrace_64.c n = access_process_vm(current, addr, &instr, sizeof(instr), current 160 arch/x86/um/signal.c current->restart_block.fn = do_no_restart_syscall; current 241 arch/x86/um/signal.c struct faultinfo * fi = ¤t->thread.arch.faultinfo; current 333 arch/x86/um/signal.c err |= copy_sc_to_user(&uc->uc_mcontext, fp, ¤t->thread.regs, 0); current 454 arch/x86/um/signal.c unsigned long sp = PT_REGS_SP(¤t->thread.regs); current 466 arch/x86/um/signal.c if (copy_sc_from_user(¤t->thread.regs, sc)) current 470 arch/x86/um/signal.c PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; current 471 arch/x86/um/signal.c return PT_REGS_SYSCALL_RET(¤t->thread.regs); current 561 arch/x86/um/signal.c unsigned long sp = PT_REGS_SP(¤t->thread.regs); current 572 arch/x86/um/signal.c if (copy_sc_from_user(¤t->thread.regs, &uc->uc_mcontext)) current 576 arch/x86/um/signal.c PT_REGS_SYSCALL_NR(¤t->thread.regs) = -1; current 577 arch/x86/um/signal.c return PT_REGS_SYSCALL_RET(¤t->thread.regs); current 38 arch/x86/um/syscalls_64.c ret = restore_registers(pid, ¤t->thread.regs.regs); current 61 arch/x86/um/syscalls_64.c current->thread.arch.fs = (unsigned long) ptr; current 62 arch/x86/um/syscalls_64.c ret = save_registers(pid, ¤t->thread.regs.regs); current 65 arch/x86/um/syscalls_64.c ret = save_registers(pid, ¤t->thread.regs.regs); current 80 arch/x86/um/syscalls_64.c return arch_prctl(current, option, (unsigned long __user *) arg2); current 20 arch/x86/um/sysrq_64.c printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), current 21 arch/x86/um/sysrq_64.c current->comm, print_tainted(), init_utsname()->release); current 259 arch/x86/um/tls_32.c if (unlikely(task == current && current 262 arch/x86/um/tls_32.c "without flushed TLS.", current->pid); current 291 arch/x86/um/tls_32.c idx = get_free_idx(current); current 303 arch/x86/um/tls_32.c return set_tls_entry(current, &info, idx, 1); current 336 arch/x86/um/tls_32.c ret = get_tls_entry(current, &info, idx); current 56 arch/x86/um/vdso/vma.c struct mm_struct *mm = current->mm; current 178 arch/xtensa/include/asm/elf.h set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK))) current 36 arch/xtensa/include/asm/stackprotector.h current->stack_canary = canary; current 37 arch/xtensa/include/asm/stackprotector.h __stack_chk_guard = current->stack_canary; current 24 arch/xtensa/include/asm/stacktrace.h if (!task || task == current) current 35 arch/xtensa/include/asm/uaccess.h #define get_fs() (current->thread.current_ds) current 36 arch/xtensa/include/asm/uaccess.h #define set_fs(val) (current->thread.current_ds = (val)) current 147 arch/xtensa/kernel/process.c flush_ptrace_hw_breakpoint(current); current 306 arch/xtensa/kernel/process.c if (!p || p == current || p->state == TASK_RUNNING) current 374 arch/xtensa/kernel/ptrace.c if (current->thread.ptrace_bp[i] == bp) current 379 arch/xtensa/kernel/ptrace.c if (current->thread.ptrace_wp[i] == bp) current 552 arch/xtensa/kernel/ptrace.c trace_sys_enter(regs, syscall_get_nr(current, regs)); current 247 arch/xtensa/kernel/signal.c current->restart_block.fn = do_no_restart_syscall; current 344 arch/xtensa/kernel/signal.c sp = current->sas_ss_sp + current->sas_ss_size; current 411 arch/xtensa/kernel/signal.c current->comm, current->pid, sig, frame, regs->pc); current 429 arch/xtensa/kernel/signal.c task_pt_regs(current)->icountlevel = 0; current 468 arch/xtensa/kernel/signal.c if (current->ptrace & PT_SINGLESTEP) current 469 arch/xtensa/kernel/signal.c task_pt_regs(current)->icountlevel = 1; current 494 arch/xtensa/kernel/signal.c if (current->ptrace & PT_SINGLESTEP) current 495 arch/xtensa/kernel/signal.c task_pt_regs(current)->icountlevel = 1; current 148 arch/xtensa/kernel/smp.c current->active_mm = mm; current 150 arch/xtensa/kernel/smp.c enter_lazy_tlb(mm, current); current 234 arch/xtensa/kernel/stacktrace.c save_stack_trace_tsk(current, trace); current 86 arch/xtensa/kernel/syscall.c for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { current 189 arch/xtensa/kernel/traps.c current->comm, task_pid_nr(current), regs->pc, current 312 arch/xtensa/kernel/traps.c current->comm, task_pid_nr(current), regs->pc); current 331 arch/xtensa/kernel/traps.c current->thread.bad_vaddr = regs->excvaddr; current 332 arch/xtensa/kernel/traps.c current->thread.error_code = -3; current 335 arch/xtensa/kernel/traps.c regs->excvaddr, current->comm, current 336 arch/xtensa/kernel/traps.c task_pid_nr(current), regs->pc); current 39 arch/xtensa/mm/fault.c struct mm_struct *mm = current->mm; current 70 arch/xtensa/mm/fault.c current->comm, current->pid, current 113 arch/xtensa/mm/fault.c if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) current 127 arch/xtensa/mm/fault.c current->maj_flt++; current 129 arch/xtensa/mm/fault.c current->min_flt++; current 158 arch/xtensa/mm/fault.c current->thread.bad_vaddr = address; current 159 arch/xtensa/mm/fault.c current->thread.error_code = is_write; current 184 arch/xtensa/mm/fault.c current->thread.bad_vaddr = address; current 197 arch/xtensa/mm/fault.c struct mm_struct *act_mm = current->active_mm; current 241 arch/xtensa/mm/fault.c current->comm, regs->pc, entry->fixup); current 242 arch/xtensa/mm/fault.c current->thread.bad_uaddr = address; current 104 arch/xtensa/mm/kasan_init.c current->kasan_depth = 0; current 67 arch/xtensa/mm/tlb.c if (mm == current->active_mm) { current 2226 block/bfq-iosched.c struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); current 4970 block/bfq-iosched.c struct task_struct *tsk = current; current 5155 block/bfq-iosched.c bfq_init_bfqq(bfqd, bfqq, bic, current->pid, current 5986 block/bfq-iosched.c bfqq->pid = current->pid; current 378 block/bio.c while ((bio = bio_list_pop(¤t->bio_list[0]))) current 380 block/bio.c current->bio_list[0] = nopunt; current 383 block/bio.c while ((bio = bio_list_pop(¤t->bio_list[1]))) current 385 block/bio.c current->bio_list[1] = nopunt; current 474 block/bio.c if (current->bio_list && current 475 block/bio.c (!bio_list_empty(¤t->bio_list[0]) || current 476 block/bio.c !bio_list_empty(¤t->bio_list[1])) && current 1250 block/bio.c if (!current->mm) current 1730 block/blk-cgroup.c } while (!fatal_signal_pending(current)); current 1749 block/blk-cgroup.c struct request_queue *q = current->throttle_queue; current 1753 block/blk-cgroup.c bool use_memdelay = current->use_memdelay; current 1758 block/blk-cgroup.c current->throttle_queue = NULL; current 1759 block/blk-cgroup.c current->use_memdelay = false; current 1766 block/blk-cgroup.c blkcg = css_to_blkcg(task_css(current, io_cgrp_id)); current 1805 block/blk-cgroup.c if (unlikely(current->flags & PF_KTHREAD)) current 1811 block/blk-cgroup.c if (current->throttle_queue) current 1812 block/blk-cgroup.c blk_put_queue(current->throttle_queue); current 1813 block/blk-cgroup.c current->throttle_queue = q; current 1815 block/blk-cgroup.c current->use_memdelay = use_memdelay; current 1816 block/blk-cgroup.c set_notify_resume(current); current 1031 block/blk-core.c if (current->bio_list) { current 1032 block/blk-core.c bio_list_add(¤t->bio_list[0], bio); current 1052 block/blk-core.c current->bio_list = bio_list_on_stack; current 1091 block/blk-core.c current->bio_list = NULL; /* deactivate */ current 1174 block/blk-core.c current->comm, task_pid_nr(current), current 1698 block/blk-core.c struct task_struct *tsk = current; current 1739 block/blk-core.c struct blk_plug *plug = current->plug; current 1781 block/blk-core.c if (plug != current->plug) current 1785 block/blk-core.c current->plug = NULL; current 278 block/blk-ioc.c (task == current || !(task->flags & PF_EXITING))) current 1766 block/blk-iocost.c if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { current 1789 block/blk-iocost.c wait.wait.private = current; current 298 block/blk-iolatency.c if (issue_as_root || fatal_signal_pending(current)) { current 44 block/blk-mq-sched.c ioc = current->io_context; current 3456 block/blk-mq.c } while (hs.task && !signal_pending(current)); current 3509 block/blk-mq.c if (current->plug) current 3510 block/blk-mq.c blk_flush_plug_list(current->plug, false); current 3526 block/blk-mq.c state = current->state; current 3539 block/blk-mq.c if (signal_pending_state(state, current)) current 3542 block/blk-mq.c if (current->state == TASK_RUNNING) current 263 block/blk-mq.h return current->plug; current 258 block/blk-rq-qos.c .task = current, current 303 block/blk.h if (unlikely(!current->io_context)) current 304 block/blk.h create_task_io_context(current, gfp_mask, node); current 305 block/blk.h return current->io_context; current 111 block/ioprio.c p = current; current 119 block/ioprio.c pgrp = task_pgrp(current); current 199 block/ioprio.c p = current; current 207 block/ioprio.c pgrp = task_pgrp(current); current 654 block/scsi_ioctl.c printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); current 700 crypto/af_alg.c if (signal_pending(current)) current 758 crypto/af_alg.c if (signal_pending(current)) current 79 crypto/algboss.c } while (err == -EAGAIN && !signal_pending(current)); current 438 crypto/api.c if (fatal_signal_pending(current)) { current 549 crypto/api.c if (fatal_signal_pending(current)) { current 86 drivers/acpi/acpi_dbg.c current != acpi_aml_io.thread) current 123 drivers/acpi/acpi_pad.c set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); current 144 drivers/acpi/acpi_pad.c sched_setscheduler(current, SCHED_RR, ¶m); current 1050 drivers/android/binder.c if (can_nice(current, nice)) { current 1051 drivers/android/binder.c set_user_nice(current, nice); current 1057 drivers/android/binder.c current->pid, nice, min_nice); current 1058 drivers/android/binder.c set_user_nice(current, min_nice); current 1061 drivers/android/binder.c binder_user_error("%d RLIMIT_NICE not set\n", current->pid); current 1154 drivers/android/binder.c proc->pid, current->pid, node->debug_id, current 2257 drivers/android/binder.c task_work_add(current, &twcb->twork, true); current 2351 drivers/android/binder.c proc->tsk == current->group_leader); current 2368 drivers/android/binder.c if (proc->tsk != current->group_leader) { current 3108 drivers/android/binder.c t->priority = task_nice(current); current 4098 drivers/android/binder.c if (signal_pending(current)) { current 4436 drivers/android/binder.c t->saved_priority = task_nice(current); current 4459 drivers/android/binder.c task_active_pid_ns(current)); current 4641 drivers/android/binder.c if (current->pid < thread->pid) current 4643 drivers/android/binder.c else if (current->pid > thread->pid) current 4653 drivers/android/binder.c thread->pid = current->pid; current 5127 drivers/android/binder.c pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); current 5173 drivers/android/binder.c if (proc->tsk != current->group_leader) current 5213 drivers/android/binder.c current->group_leader->pid, current->pid); current 5220 drivers/android/binder.c get_task_struct(current->group_leader); current 5221 drivers/android/binder.c proc->tsk = current->group_leader; current 5223 drivers/android/binder.c proc->default_priority = task_nice(current); current 5238 drivers/android/binder.c proc->pid = current->group_leader->pid; current 1005 drivers/android/binder_alloc.c alloc->pid = current->group_leader->pid; current 690 drivers/android/binderfs.c info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); current 7135 drivers/ata/libata-core.c bool owns_eh = ap && ap->host->eh_owner == current; current 466 drivers/ata/libata-eh.c ap->host->eh_owner = current; current 481 drivers/ata/libata-eh.c WARN_ON_ONCE(ap->host->eh_owner != current); current 525 drivers/atm/ambassador.h #define NEXTQ(current,start,limit) \ current 526 drivers/atm/ambassador.h ( (current)+1 < (limit) ? (current)+1 : (start) ) current 46 drivers/atm/atmtcp.c DECLARE_WAITQUEUE(wait,current); current 843 drivers/atm/eni.c DECLARE_WAITQUEUE(wait,current); current 1394 drivers/atm/eni.c DECLARE_WAITQUEUE(wait,current); current 2334 drivers/atm/he.c DECLARE_WAITQUEUE(wait, current); current 1067 drivers/atm/horizon.c if (signal_pending (current)) current 202 drivers/atm/solos-pci.c "L%05d\n%s\n", current->pid, attr->attr.name); current 210 drivers/atm/solos-pci.c prm.pid = current->pid; current 258 drivers/atm/solos-pci.c "L%05d\n%s\n%s\n", current->pid, attr->attr.name, buf); current 266 drivers/atm/solos-pci.c prm.pid = current->pid; current 209 drivers/base/power/main.c task_pid_nr(current), current 533 drivers/base/power/main.c wd->tsk = current; current 925 drivers/base/power/wakeup.c if (inpr == 0 || signal_pending(current)) current 1230 drivers/block/aoe/aoecmd.c DECLARE_WAITQUEUE(wait, current); current 1234 drivers/block/aoe/aoecmd.c current->flags |= PF_NOFREEZE; current 1235 drivers/block/aoe/aoecmd.c set_user_nice(current, -10); current 186 drivers/block/drbd/drbd_actlog.c current->comm, current->pid, __func__, current 193 drivers/block/drbd/drbd_actlog.c current->comm, current->pid, __func__, current 119 drivers/block/drbd/drbd_bitmap.c current->comm, task_pid_nr(current), current 138 drivers/block/drbd/drbd_bitmap.c current->comm, task_pid_nr(current), current 148 drivers/block/drbd/drbd_bitmap.c b->bm_task = current; current 321 drivers/block/drbd/drbd_main.c snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", current 355 drivers/block/drbd/drbd_main.c drbd_info(resource, "Terminating %s\n", current->comm); current 391 drivers/block/drbd/drbd_main.c thi->name, current->comm, current->pid); current 408 drivers/block/drbd/drbd_main.c flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ current 431 drivers/block/drbd/drbd_main.c thi->name, current->comm, current->pid); current 469 drivers/block/drbd/drbd_main.c if (thi->task != current) current 542 drivers/block/drbd/drbd_main.c struct task_struct *p = current; current 1491 drivers/block/drbd/drbd_main.c current->comm, current->pid, connection->ko_count); current 1870 drivers/block/drbd/drbd_main.c flush_signals(current); current 3620 drivers/block/drbd/drbd_main.c D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); current 3662 drivers/block/drbd/drbd_main.c D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); current 3807 drivers/block/drbd/drbd_main.c if (signal_pending(current)) current 369 drivers/block/drbd/drbd_nl.c if (current == connection->worker.task) current 398 drivers/block/drbd/drbd_nl.c if (current == connection->worker.task) current 607 drivers/block/drbd/drbd_nl.c flush_signals(current); current 296 drivers/block/drbd/drbd_receiver.c if (signal_pending(current)) { current 562 drivers/block/drbd/drbd_receiver.c if (err && !signal_pending(current)) current 1011 drivers/block/drbd/drbd_receiver.c if (signal_pending(current)) { current 1012 drivers/block/drbd/drbd_receiver.c flush_signals(current); current 1191 drivers/block/drbd/drbd_receiver.c if (current->plug == &connection->receiver_plug) { current 2398 drivers/block/drbd/drbd_receiver.c if (signal_pending(current)) { current 6024 drivers/block/drbd/drbd_receiver.c rv = sched_setscheduler(current, SCHED_RR, ¶m); current 6088 drivers/block/drbd/drbd_receiver.c flush_signals(current); current 625 drivers/block/drbd/drbd_state.c D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); current 1526 drivers/block/drbd/drbd_state.c D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); current 1780 drivers/block/drbd/drbd_worker.c if (current == connection->worker.task) { current 2112 drivers/block/drbd/drbd_worker.c if (!list_empty(work_list) || signal_pending(current)) { current 2183 drivers/block/drbd/drbd_worker.c if (signal_pending(current)) { current 2184 drivers/block/drbd/drbd_worker.c flush_signals(current); current 918 drivers/block/loop.c current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO; current 1018 drivers/block/nbd.c nbd->task_setup = current; current 1021 drivers/block/nbd.c (nbd->task_setup != current || current 1250 drivers/block/nbd.c nbd->task_recv = current; current 1433 drivers/block/pktcdvd.c set_user_nice(current, MIN_NICE); current 1437 drivers/block/pktcdvd.c DECLARE_WAITQUEUE(wait, current); current 3096 drivers/block/skd_main.c bdev->bd_disk->disk_name, current->comm); current 330 drivers/block/swim.c current->state = TASK_INTERRUPTIBLE; current 349 drivers/block/swim.c current->state = TASK_INTERRUPTIBLE; current 373 drivers/block/swim.c current->state = TASK_INTERRUPTIBLE; current 849 drivers/block/swim3.c if (signal_pending(current)) { current 930 drivers/block/swim3.c if (signal_pending(current)) { current 1034 drivers/block/swim3.c if (signal_pending(current)) current 599 drivers/block/xen-blkback/blkback.c current->comm, ring->st_oo_req, current 603 drivers/bluetooth/btmrvl_main.c init_waitqueue_entry(&wait, current); current 1105 drivers/cdrom/cdrom.c (unsigned int)task_pid_nr(current)); current 592 drivers/char/agp/frontend.c client = agp_find_client_by_pid(current->pid); current 692 drivers/char/agp/frontend.c priv->my_pid = current->pid; current 698 drivers/char/agp/frontend.c client = agp_find_client_by_pid(current->pid); current 517 drivers/char/agp/generic.c printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to x1 mode.\n", current->comm); current 595 drivers/char/agp/generic.c printk(KERN_INFO PFX "%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current->comm); current 599 drivers/char/agp/generic.c printk(KERN_INFO PFX "%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current->comm, tmp * 4); current 629 drivers/char/agp/generic.c current->comm, *requested_mode); current 640 drivers/char/agp/generic.c current->comm, *requested_mode); current 649 drivers/char/agp/generic.c printk(KERN_INFO PFX "%s requested AGPx8 but bridge not capable.\n", current->comm); current 655 drivers/char/agp/generic.c printk(KERN_INFO PFX "%s requested AGPx8 but graphic card not capable.\n", current->comm); current 368 drivers/char/applicom.c DECLARE_WAITQUEUE(wait, current); current 447 drivers/char/applicom.c if (signal_pending(current)) { current 545 drivers/char/applicom.c DECLARE_WAITQUEUE(wait, current); current 612 drivers/char/applicom.c if (signal_pending(current)) current 298 drivers/char/hpet.c DECLARE_WAITQUEUE(wait, current); current 325 drivers/char/hpet.c } else if (signal_pending(current)) { current 263 drivers/char/hw_random/core.c if (signal_pending(current)) { current 77 drivers/char/hw_random/s390-trng.c if (signal_pending(current)) { current 994 drivers/char/ipmi/ipmi_si_intf.c set_user_nice(current, MAX_NICE); current 797 drivers/char/ipmi/ipmi_watchdog.c init_waitqueue_entry(&wait, current); current 807 drivers/char/ipmi/ipmi_watchdog.c if (signal_pending(current)) { current 291 drivers/char/lp.c if (signal_pending(current)) { current 352 drivers/char/lp.c if (signal_pending(current)) { current 475 drivers/char/lp.c if (signal_pending(current)) { current 104 drivers/char/mem.c return fatal_signal_pending(current); current 718 drivers/char/mem.c if (signal_pending(current)) current 752 drivers/char/mem.c return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); current 333 drivers/char/mwave/mwavedd.c DECLARE_WAITQUEUE(wait, current); current 929 drivers/char/pcmcia/cm4000_cs.c DEBUGP(2, dev, "-> cmm_read(%s,%d)\n", current->comm, current->pid); current 1059 drivers/char/pcmcia/cm4000_cs.c DEBUGP(2, dev, "-> cmm_write(%s,%d)\n", current->comm, current->pid); current 1658 drivers/char/pcmcia/cm4000_cs.c imajor(inode), minor, current->comm, current->pid); current 226 drivers/char/pcmcia/cm4040_cs.c DEBUGP(2, dev, "-> cm4040_read(%s,%d)\n", current->comm, current->pid); current 326 drivers/char/pcmcia/cm4040_cs.c DEBUGP(2, dev, "-> cm4040_write(%s,%d)\n", current->comm, current->pid); current 1962 drivers/char/pcmcia/synclink_cs.c DECLARE_WAITQUEUE(wait, current); current 2002 drivers/char/pcmcia/synclink_cs.c if (signal_pending(current)) { current 2067 drivers/char/pcmcia/synclink_cs.c DECLARE_WAITQUEUE(wait, current); current 2078 drivers/char/pcmcia/synclink_cs.c if (signal_pending(current)) { current 2397 drivers/char/pcmcia/synclink_cs.c if (signal_pending(current)) current 2406 drivers/char/pcmcia/synclink_cs.c if (signal_pending(current)) current 175 drivers/char/ppdev.c if (signal_pending(current)) { current 257 drivers/char/ppdev.c if (signal_pending(current)) current 1127 drivers/char/random.c if (signal_pending(current)) { current 1643 drivers/char/random.c if (signal_pending(current)) { current 2012 drivers/char/random.c if (signal_pending(current)) current 2035 drivers/char/random.c current->comm, nbytes); current 318 drivers/char/rtc.c DECLARE_WAITQUEUE(wait, current); current 357 drivers/char/rtc.c if (signal_pending(current)) { current 263 drivers/char/tpm/st33zp24/st33zp24.c if (ret == -ERESTARTSYS && freezing(current)) current 281 drivers/char/tpm/st33zp24/st33zp24.c } while (ret == -ERESTARTSYS && freezing(current)); current 87 drivers/char/tpm/tpm-dev-common.c task_tgid_nr(current)); current 77 drivers/char/tpm/tpm_tis_core.c if (rc == -ERESTARTSYS && freezing(current)) { current 177 drivers/char/tpm/tpm_tis_core.c if (rc == -ERESTARTSYS && freezing(current)) { current 217 drivers/char/tpm/tpm_tis_core.c if (rc == -ERESTARTSYS && freezing(current)) { current 85 drivers/char/tpm/xen-tpmfront.c if (rc == -ERESTARTSYS && freezing(current)) { current 136 drivers/clk/clk.c if (prepare_owner == current) { current 144 drivers/clk/clk.c prepare_owner = current; current 150 drivers/clk/clk.c WARN_ON_ONCE(prepare_owner != current); current 171 drivers/clk/clk.c if (enable_owner == current) { current 182 drivers/clk/clk.c enable_owner = current; current 190 drivers/clk/clk.c WARN_ON_ONCE(enable_owner != current); current 195 drivers/connector/cn_proc.c ev->event_data.ptrace.tracer_pid = current->pid; current 196 drivers/connector/cn_proc.c ev->event_data.ptrace.tracer_tgid = current->tgid; current 354 drivers/connector/cn_proc.c (task_active_pid_ns(current) != &init_pid_ns)) current 415 drivers/cpufreq/cpufreq.c && current == policy->transition_task); current 428 drivers/cpufreq/cpufreq.c policy->transition_task = current; current 299 drivers/cpufreq/pmac32-cpufreq.c switch_mmu_context(NULL, current->active_mm, NULL); current 662 drivers/crypto/amcc/crypto4xx_core.c static u32 get_next_gd(u32 current) current 664 drivers/crypto/amcc/crypto4xx_core.c if (current != PPC4XX_LAST_GD) current 665 drivers/crypto/amcc/crypto4xx_core.c return current + 1; current 670 drivers/crypto/amcc/crypto4xx_core.c static u32 get_next_sd(u32 current) current 672 drivers/crypto/amcc/crypto4xx_core.c if (current != PPC4XX_LAST_SD) current 673 drivers/crypto/amcc/crypto4xx_core.c return current + 1; current 363 drivers/crypto/chelsio/chtls/chtls_cm.c DECLARE_WAITQUEUE(wait, current); current 387 drivers/crypto/chelsio/chtls/chtls_cm.c if (signal_pending(current)) { current 945 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) current 1425 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 1446 drivers/crypto/chelsio/chtls/chtls_io.c signal_pending(current)) current 1468 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 1581 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 1613 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 1629 drivers/crypto/chelsio/chtls/chtls_io.c current->comm, current->pid); current 1729 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 1752 drivers/crypto/chelsio/chtls/chtls_io.c signal_pending(current)) current 1771 drivers/crypto/chelsio/chtls/chtls_io.c if (signal_pending(current)) { current 58 drivers/crypto/nx/nx.c atomic_set(&(nx_ctx->stats->last_error_pid), current->pid); current 128 drivers/crypto/nx/nx.c current->pid); current 31 drivers/dax/device.c current->comm, func); current 39 drivers/dax/device.c current->comm, func, vma->vm_start, vma->vm_end, current 48 drivers/dax/device.c current->comm, func); current 55 drivers/dax/device.c current->comm, func); current 225 drivers/dax/device.c dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, current 353 drivers/dax/device.c addr_align = current->mm->get_unmapped_area(filp, addr, len_align, current 360 drivers/dax/device.c return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); current 471 drivers/dma-buf/dma-fence.c if (intr && signal_pending(current)) { current 497 drivers/dma-buf/dma-fence.c cb.task = current; current 510 drivers/dma-buf/dma-fence.c if (ret > 0 && intr && signal_pending(current)) current 592 drivers/dma-buf/dma-fence.c cb[i].task = current; current 613 drivers/dma-buf/dma-fence.c if (ret > 0 && intr && signal_pending(current)) current 97 drivers/dma-buf/selftest.c if (signal_pending(current)) current 49 drivers/dma-buf/st-dma-fence.c struct wait_cb cb = { .task = current }; current 60 drivers/dma-buf/st-dma-fence.c if (signal_pending_state(state, current)) current 70 drivers/dma-buf/st-dma-fence.c if (signal_pending_state(state, current)) current 298 drivers/dma-buf/sw_sync.c get_task_comm(task_comm, current); current 365 drivers/dma/dmatest.c const char *thread_name = current->comm; current 411 drivers/dma/dmatest.c current->comm, error_count - MAX_ERROR_COUNT); current 448 drivers/dma/dmatest.c current->comm, n, err, src_off, dst_off, len, data); current 456 drivers/dma/dmatest.c current->comm, n, err, src_off, dst_off, len, data); current 646 drivers/dma/dmatest.c set_user_nice(current, 10); current 841 drivers/dma/dmatest.c pr_debug("%s: verifying source buffer...\n", current->comm); current 851 drivers/dma/dmatest.c pr_debug("%s: verifying dest buffer...\n", current->comm); current 899 drivers/dma/dmatest.c current->comm, total_tests, failed_tests, current 157 drivers/firmware/efi/arm-runtime.c efi_set_pgd(current->active_mm); current 285 drivers/firmware/psci/psci_checker.c if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) current 354 drivers/firmware/psci/psci_checker.c if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) current 50 drivers/fpga/dfl-afu-dma-region.c ret = account_locked_vm(current->mm, npages, true); current 79 drivers/fpga/dfl-afu-dma-region.c account_locked_vm(current->mm, npages, false); current 99 drivers/fpga/dfl-afu-dma-region.c account_locked_vm(current->mm, npages, false); current 186 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h if ((mmptr) == current->mm) { \ current 188 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h } else if (current->mm == NULL) { \ current 74 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c get_task_comm(fence->timeline_name, current); current 859 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c current->mm); current 866 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); current 1198 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ret = init_user_pages(*mem, current->mm, user_addr); current 1330 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c down_write(¤t->mm->mmap_sem); current 1332 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c up_write(¤t->mm->mmap_sem); current 109 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c if (usermm != current->mm) { current 542 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (usermm && usermm != current->mm) current 133 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c if (mm && mm != current->mm) current 318 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c struct mm_struct *mm = current->mm; current 1346 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c gtt->usertask = current->group_leader; current 1499 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (amdkfd_fence_check_mm(f, current->mm)) current 3119 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c vm->task_info.pid = current->pid; current 3120 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c get_task_comm(vm->task_info.task_name, current); current 3122 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (current->group_leader->mm == current->mm) { current 3123 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c vm->task_info.tgid = current->group_leader->pid; current 3124 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c get_task_comm(vm->task_info.process_name, current->group_leader); current 119 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c current->pid); current 1806 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c process = kfd_get_process(current); current 1853 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c task_pid_nr(current), cmd, nr); current 1904 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c process = kfd_get_process(current); current 335 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (WARN(q->process->mm != current->mm, current 340 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->queue, &q->properties, current->mm); current 555 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (WARN(q->process->mm != current->mm, current 561 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c &q->properties, current->mm); current 721 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (fatal_signal_pending(current)) { current 726 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (signal_pending(current)) { current 133 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c kq->queue->process = kfd_get_process(current); current 270 drivers/gpu/drm/amd/amdkfd/kfd_process.c struct task_struct *thread = current; current 1459 drivers/gpu/drm/drm_bufs.c task_pid_nr(current)); current 161 drivers/gpu/drm/drm_dp_aux_dev.c if (signal_pending(current)) { current 208 drivers/gpu/drm/drm_dp_aux_dev.c if (signal_pending(current)) { current 127 drivers/gpu/drm/drm_file.c file->pid = get_pid(task_pid(current)); current 219 drivers/gpu/drm/drm_file.c task_pid_nr(current), current 310 drivers/gpu/drm/drm_file.c DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index); current 734 drivers/gpu/drm/drm_framebuffer.c strcpy(fb->comm, current->comm); current 989 drivers/gpu/drm/drm_ioc32.c task_pid_nr(current), current 200 drivers/gpu/drm/drm_ioctl.c client->pid = task_pid_vnr(current); current 340 drivers/gpu/drm/drm_ioctl.c if (current->comm[0] == 'X' && req->value == 1) { current 855 drivers/gpu/drm/drm_ioctl.c task_pid_nr(current), current 893 drivers/gpu/drm/drm_ioctl.c task_pid_nr(current), current 900 drivers/gpu/drm/drm_ioctl.c DRM_DEBUG("pid=%d, ret = %d\n", task_pid_nr(current), retcode); current 167 drivers/gpu/drm/drm_lock.c DECLARE_WAITQUEUE(entry, current); current 179 drivers/gpu/drm/drm_lock.c task_pid_nr(current), lock->context); current 184 drivers/gpu/drm/drm_lock.c lock->context, task_pid_nr(current), current 197 drivers/gpu/drm/drm_lock.c send_sig(SIGTERM, current, 0); current 211 drivers/gpu/drm/drm_lock.c if (signal_pending(current)) { current 267 drivers/gpu/drm/drm_lock.c task_pid_nr(current), lock->context); current 344 drivers/gpu/drm/drm_syncobj.c wait.task = current; current 359 drivers/gpu/drm/drm_syncobj.c if (signal_pending(current)) { current 908 drivers/gpu/drm/drm_syncobj.c entries[i].task = current; current 985 drivers/gpu/drm/drm_syncobj.c if (signal_pending(current)) { current 408 drivers/gpu/drm/drm_vm.c vma_entry->pid = current->pid; current 664 drivers/gpu/drm/etnaviv/etnaviv_gem.c might_lock_read(¤t->mm->mmap_sem); current 666 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (userptr->mm != current->mm) current 737 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_obj->userptr.mm = current->mm; current 1327 drivers/gpu/drm/exynos/exynos_drm_g2d.c runqueue_node->pid = current->pid; current 1013 drivers/gpu/drm/i810/i810_dma.c task_pid_nr(current), retcode, d->granted); current 689 drivers/gpu/drm/i915/gem/i915_gem_context.c ctx->pid = get_task_pid(current, PIDTYPE_PID); current 691 drivers/gpu/drm/i915/gem/i915_gem_context.c current->comm, pid_nr(ctx->pid)); current 2097 drivers/gpu/drm/i915/gem/i915_gem_context.c current->comm, task_pid_nr(current)); current 1688 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (signal_pending(current)) { current 88 drivers/gpu/drm/i915/gem/i915_gem_mman.c struct mm_struct *mm = current->mm; current 360 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mm = __i915_mm_struct_find(dev_priv, current->mm); current 371 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mm->mm = current->mm; current 372 drivers/gpu/drm/i915/gem/i915_gem_userptr.c mmgrab(current->mm); current 571 drivers/gpu/drm/i915/gem/i915_gem_userptr.c work->task = current; current 617 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (mm == current->mm) { current 1769 drivers/gpu/drm/i915/gvt/kvmgt.c if (!kvm || kvm->mm != current->mm) { current 1968 drivers/gpu/drm/i915/gvt/kvmgt.c bool kthread = current->mm == NULL; current 1370 drivers/gpu/drm/i915/i915_request.c if (signal_pending_state(state, current)) current 1483 drivers/gpu/drm/i915/i915_request.c wait.tsk = current; current 1495 drivers/gpu/drm/i915/i915_request.c if (signal_pending_state(state, current)) { current 135 drivers/gpu/drm/i915/selftests/i915_selftest.c if (signal_pending(current)) current 143 drivers/gpu/drm/i915/selftests/i915_selftest.c if (err == -EINTR && !signal_pending(current)) current 308 drivers/gpu/drm/i915/selftests/i915_selftest.c if (signal_pending(current)) current 339 drivers/gpu/drm/i915/selftests/i915_selftest.c if (!signal_pending(current)) { current 1123 drivers/gpu/drm/mga/mga_dma.c task_pid_nr(current), d->send_count); current 1131 drivers/gpu/drm/mga/mga_dma.c task_pid_nr(current), d->request_count, current 188 drivers/gpu/drm/mga/mga_ioc32.c task_pid_nr(current), current 1017 drivers/gpu/drm/mga/mga_state.c DRM_DEBUG("pid=%d\n", task_pid_nr(current)); current 1049 drivers/gpu/drm/mga/mga_state.c DRM_DEBUG("pid=%d\n", task_pid_nr(current)); current 1078 drivers/gpu/drm/mga/mga_state.c DRM_DEBUG("pid=%d\n", task_pid_nr(current)); current 132 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pid = current->tgid; current 994 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h #define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0) current 995 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h #define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1) current 999 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h trace_dpu_trace_counter(current->tgid, name, value) current 410 drivers/gpu/drm/msm/msm_gem_submit.c struct pid *pid = get_pid(task_pid(current)); current 1058 drivers/gpu/drm/nouveau/nouveau_drm.c get_task_comm(tmpname, current); current 281 drivers/gpu/drm/nouveau/nouveau_fence.c if (intr && signal_pending(current)) current 305 drivers/gpu/drm/nouveau/nouveau_fence.c if (intr && signal_pending(current)) { current 173 drivers/gpu/drm/nouveau/nouveau_svm.c mm = get_task_mm(current); current 347 drivers/gpu/drm/nouveau/nouveau_svm.c svmm->mm = get_task_mm(current); current 1068 drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c if (signal_pending(current)) { current 927 drivers/gpu/drm/r128/r128_cce.c task_pid_nr(current), d->send_count); current 935 drivers/gpu/drm/r128/r128_cce.c task_pid_nr(current), d->request_count, dma->buf_count); current 834 drivers/gpu/drm/r128/r128_state.c task_pid_nr(current), buf->file_priv); current 1327 drivers/gpu/drm/r128/r128_state.c task_pid_nr(current), vertex->idx, vertex->count, vertex->discard); current 1348 drivers/gpu/drm/r128/r128_state.c task_pid_nr(current), buf->file_priv); current 1379 drivers/gpu/drm/r128/r128_state.c DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current), current 1401 drivers/gpu/drm/r128/r128_state.c task_pid_nr(current), buf->file_priv); current 1442 drivers/gpu/drm/r128/r128_state.c DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx); current 1542 drivers/gpu/drm/r128/r128_state.c task_pid_nr(current), buf->file_priv); current 1589 drivers/gpu/drm/r128/r128_state.c DRM_DEBUG("pid=%d\n", task_pid_nr(current)); current 199 drivers/gpu/drm/radeon/radeon_cs.c down_read(¤t->mm->mmap_sem); current 204 drivers/gpu/drm/radeon/radeon_cs.c up_read(¤t->mm->mmap_sem); current 1086 drivers/gpu/drm/radeon/radeon_fence.c cb.task = current; current 1111 drivers/gpu/drm/radeon/radeon_fence.c if (t > 0 && intr && signal_pending(current)) current 87 drivers/gpu/drm/radeon/radeon_gem.c robj->pid = task_pid_nr(current); current 344 drivers/gpu/drm/radeon/radeon_gem.c down_read(¤t->mm->mmap_sem); current 347 drivers/gpu/drm/radeon/radeon_gem.c up_read(¤t->mm->mmap_sem); current 354 drivers/gpu/drm/radeon/radeon_gem.c up_read(¤t->mm->mmap_sem); current 184 drivers/gpu/drm/radeon/radeon_mn.c mn = mmu_notifier_get(&radeon_mn_ops, current->mm); current 496 drivers/gpu/drm/radeon/radeon_ttm.c if (current->mm != gtt->usermm) current 748 drivers/gpu/drm/radeon/radeon_ttm.c gtt->usermm = current->mm; current 1026 drivers/gpu/drm/savage/savage_bci.c task_pid_nr(current), d->send_count); current 1034 drivers/gpu/drm/savage/savage_bci.c task_pid_nr(current), d->request_count, dma->buf_count); current 183 drivers/gpu/drm/scheduler/sched_entity.c if (current->flags & PF_EXITING) { current 195 drivers/gpu/drm/scheduler/sched_entity.c last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); current 196 drivers/gpu/drm/scheduler/sched_entity.c if ((!last_user || last_user == current->group_leader) && current 197 drivers/gpu/drm/scheduler/sched_entity.c (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { current 525 drivers/gpu/drm/scheduler/sched_entity.c WRITE_ONCE(entity->last_user, current->group_leader); current 704 drivers/gpu/drm/scheduler/sched_main.c sched_setscheduler(current, SCHED_FIFO, &sparam); current 723 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c pool->dev_name, pool->name, current->pid, count); current 1126 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c p->pool->dev_name, p->pool->name, current->pid, current 1206 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c current->pid); current 403 drivers/gpu/drm/vc4/vc4_gem.c if (interruptible && signal_pending(current)) { current 164 drivers/gpu/drm/via/via_drv.h DECLARE_WAITQUEUE(entry, current); \ current 177 drivers/gpu/drm/via/via_drv.h if (signal_pending(current)) { \ current 268 drivers/gpu/drm/virtio/virtgpu_kms.c get_task_comm(dbgname, current); current 191 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c if (intr && signal_pending(current)) { current 197 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c cb.task = current; current 219 drivers/gpu/drm/vmwgfx/vmwgfx_fence.c if (intr && signal_pending(current)) { current 241 drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c if (interruptible && signal_pending(current)) { current 219 drivers/gpu/drm/vmwgfx/vmwgfx_irq.c if (interruptible && signal_pending(current)) { current 285 drivers/gpu/host1x/syncpt.c current->comm, sp->id, sp->name, current 499 drivers/gpu/vga/vgaarb.c init_waitqueue_entry(&wait, current); current 504 drivers/gpu/vga/vgaarb.c if (interruptible && signal_pending(current)) { current 1367 drivers/hid/hid-core.c __func__, n, current->comm); current 1413 drivers/hid/hid-core.c __func__, n, current->comm); current 1421 drivers/hid/hid-core.c __func__, value, n, current->comm); current 1096 drivers/hid/hid-debug.c DECLARE_WAITQUEUE(wait, current); current 1109 drivers/hid/hid-debug.c if (signal_pending(current)) { current 80 drivers/hid/hid-roccat.c DECLARE_WAITQUEUE(wait, current); current 95 drivers/hid/hid-roccat.c if (signal_pending(current)) { current 43 drivers/hid/hidraw.c DECLARE_WAITQUEUE(wait, current); current 53 drivers/hid/hidraw.c if (signal_pending(current)) { current 121 drivers/hid/hidraw.c task_pid_nr(current)); current 128 drivers/hid/hidraw.c task_pid_nr(current)); current 201 drivers/hid/hidraw.c task_pid_nr(current)); current 208 drivers/hid/hidraw.c task_pid_nr(current)); current 731 drivers/hid/uhid.c task_tgid_vnr(current), current->comm); current 342 drivers/hid/usbhid/hiddev.c if (signal_pending(current)) { current 849 drivers/hsi/clients/cmt_speech.c if (signal_pending(current)) { current 1159 drivers/hsi/clients/cmt_speech.c } else if (signal_pending(current)) { current 1033 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1061 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1087 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1107 drivers/hwtracing/coresight/coresight-etm3x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1643 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1671 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1704 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 1727 drivers/hwtracing/coresight/coresight-etm4x-sysfs.c if (task_active_pid_ns(current) != &init_pid_ns) current 634 drivers/hwtracing/stm/core.c char comm[sizeof(current->comm)]; current 637 drivers/hwtracing/stm/core.c get_task_comm(comm, current); current 435 drivers/i2c/busses/i2c-ibm_iic.c if (signal_pending(current)){ current 577 drivers/i2c/busses/i2c-mpc.c if (signal_pending(current)) { current 140 drivers/iio/industrialio-buffer.c if (signal_pending(current)) { current 1673 drivers/infiniband/core/device.c ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net); current 189 drivers/infiniband/core/restrack.c get_task_struct(current); current 190 drivers/infiniband/core/restrack.c res->task = current; current 359 drivers/infiniband/core/restrack.c return task_active_pid_ns(current) == &init_pid_ns; current 503 drivers/infiniband/core/ucma.c cm_id = __rdma_create_id(current->nsproxy->net_ns, current 1709 drivers/infiniband/core/ucma.c task_tgid_vnr(current), current->comm); current 241 drivers/infiniband/core/umem.c umem->owning_mm = mm = current->mm; current 181 drivers/infiniband/core/umem_odp.c WARN_ON(mm != current->mm); current 183 drivers/infiniband/core/umem_odp.c per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID); current 317 drivers/infiniband/core/umem_odp.c umem->owning_mm = current->mm; current 413 drivers/infiniband/core/umem_odp.c umem_odp->umem.owning_mm = mm = current->mm; current 747 drivers/infiniband/core/user_mad.c current->comm); current 988 drivers/infiniband/core/user_mad.c if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { current 1086 drivers/infiniband/core/user_mad.c if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { current 673 drivers/infiniband/core/uverbs_main.c task_tgid_vnr(current), current->comm); current 1060 drivers/infiniband/core/uverbs_main.c if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) { current 1043 drivers/infiniband/hw/hfi1/affinity.c *proc_mask = current->cpus_ptr; current 1051 drivers/infiniband/hw/hfi1/affinity.c if (current->nr_cpus_allowed == 1) { current 1053 drivers/infiniband/hw/hfi1/affinity.c current->pid, current->comm, current 1062 drivers/infiniband/hw/hfi1/affinity.c } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) { current 1064 drivers/infiniband/hw/hfi1/affinity.c current->pid, current->comm, current 209 drivers/infiniband/hw/hfi1/file_ops.c fd->mm = current->mm; current 979 drivers/infiniband/hw/hfi1/file_ops.c uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num, current 1011 drivers/infiniband/hw/hfi1/file_ops.c strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm)); current 879 drivers/infiniband/hw/hfi1/sdma.c if (current->nr_cpus_allowed != 1) current 383 drivers/infiniband/hw/mlx4/mr.c down_read(¤t->mm->mmap_sem); current 389 drivers/infiniband/hw/mlx4/mr.c vma = find_vma(current->mm, untagged_start); current 398 drivers/infiniband/hw/mlx4/mr.c up_read(¤t->mm->mmap_sem); current 57 drivers/infiniband/hw/mlx5/mlx5_ib.h __LINE__, current->pid, ##arg) current 61 drivers/infiniband/hw/mlx5/mlx5_ib.h __LINE__, current->pid, ##arg) current 65 drivers/infiniband/hw/mlx5/mlx5_ib.h __LINE__, current->pid, ##arg) current 871 drivers/infiniband/hw/mthca/mthca_provider.c current->comm); current 93 drivers/infiniband/hw/qib/qib_diag.c dc->pid = current->pid; current 766 drivers/infiniband/hw/qib/qib_diag.c if (dc->pid != current->pid) { current 840 drivers/infiniband/hw/qib/qib_diag.c if (dc->pid != current->pid) { current 1145 drivers/infiniband/hw/qib/qib_file_ops.c const unsigned int weight = current->nr_cpus_allowed; current 1178 drivers/infiniband/hw/qib/qib_file_ops.c current->pid); current 1322 drivers/infiniband/hw/qib/qib_file_ops.c rcd->pid = current->pid; current 1324 drivers/infiniband/hw/qib/qib_file_ops.c strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); current 1507 drivers/infiniband/hw/qib/qib_file_ops.c rcd->subpid[subctxt_fp(fp)] = current->pid; current 1626 drivers/infiniband/hw/qib/qib_file_ops.c const unsigned int cpu = cpumask_first(current->cpus_ptr); current 1627 drivers/infiniband/hw/qib/qib_file_ops.c const unsigned int weight = current->nr_cpus_allowed; current 2046 drivers/infiniband/hw/qib/qib_file_ops.c task_tgid_vnr(current), current->comm); current 102 drivers/infiniband/hw/qib/qib_user_pages.c locked = atomic64_add_return(num_pages, ¤t->mm->pinned_vm); current 109 drivers/infiniband/hw/qib/qib_user_pages.c down_read(¤t->mm->mmap_sem); current 116 drivers/infiniband/hw/qib/qib_user_pages.c up_read(¤t->mm->mmap_sem); current 120 drivers/infiniband/hw/qib/qib_user_pages.c up_read(¤t->mm->mmap_sem); current 126 drivers/infiniband/hw/qib/qib_user_pages.c atomic64_sub(num_pages, ¤t->mm->pinned_vm); current 135 drivers/infiniband/hw/qib/qib_user_pages.c if (current->mm) current 136 drivers/infiniband/hw/qib/qib_user_pages.c atomic64_sub(num_pages, ¤t->mm->pinned_vm); current 224 drivers/infiniband/hw/qib/qib_user_sdma.c current->pid); current 234 drivers/infiniband/hw/qib/qib_user_sdma.c sdma_rb_node->pid = current->pid; current 710 drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c qp_grp->owner_pid = current->pid; current 125 drivers/infiniband/hw/usnic/usnic_uiom.c uiomr->owning_mm = mm = current->mm; current 128 drivers/infiniband/hw/usnic/usnic_uiom.c locked = atomic64_add_return(npages, ¤t->mm->pinned_vm); current 186 drivers/infiniband/hw/usnic/usnic_uiom.c atomic64_sub(npages, ¤t->mm->pinned_vm); current 1259 drivers/infiniband/sw/rdmavt/qp.c qp->pid = current->pid; current 229 drivers/infiniband/sw/siw/siw_cm.c if (signal_pending(current)) current 230 drivers/infiniband/sw/siw/siw_cm.c flush_signals(current); current 391 drivers/infiniband/sw/siw/siw_mem.c mm_s = current->mm; current 1324 drivers/infiniband/sw/siw/siw_verbs.c if (num_pages > mem_limit - current->mm->locked_vm) { current 1327 drivers/infiniband/sw/siw/siw_verbs.c current->mm->locked_vm); current 2428 drivers/infiniband/ulp/ipoib/ipoib_main.c current->comm); current 619 drivers/infiniband/ulp/isert/ib_isert.c isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm, current 620 drivers/infiniband/ulp/isert/ib_isert.c current->pid); current 234 drivers/iommu/intel-svm.c mm = get_task_mm(current); current 94 drivers/irqchip/irq-gic-v4.c task_pid_nr(current)); current 999 drivers/isdn/capi/kcapi.c if (signal_pending(current)) { current 706 drivers/isdn/mISDN/l1oip_core.c while (!signal_pending(current)) { current 202 drivers/isdn/mISDN/stack.c sigfillset(¤t->blocked); current 114 drivers/isdn/mISDN/timerdev.c if (signal_pending(current)) current 720 drivers/macintosh/adb.c DECLARE_WAITQUEUE(wait, current); current 746 drivers/macintosh/adb.c if (signal_pending(current)) { current 56 drivers/macintosh/ams/ams-core.c static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL); current 1175 drivers/macintosh/smu.c DECLARE_WAITQUEUE(wait, current); current 1197 drivers/macintosh/smu.c if (signal_pending(current)) current 1291 drivers/macintosh/smu.c DECLARE_WAITQUEUE(wait, current); current 1934 drivers/macintosh/via-pmu.c switch_mmu_context(NULL, current->active_mm, NULL); current 2023 drivers/macintosh/via-pmu.c switch_mmu_context(NULL, current->active_mm, NULL); current 2185 drivers/macintosh/via-pmu.c DECLARE_WAITQUEUE(wait, current); current 2219 drivers/macintosh/via-pmu.c if (signal_pending(current)) current 171 drivers/mailbox/mailbox-test.c DECLARE_WAITQUEUE(wait, current); current 197 drivers/mailbox/mailbox-test.c if (signal_pending(current)) { current 467 drivers/md/bcache/btree.c BUG_ON(current->bio_list); current 561 drivers/md/bcache/btree.c !current->bio_list) current 891 drivers/md/bcache/btree.c old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); current 892 drivers/md/bcache/btree.c if (old && old != current) { current 932 drivers/md/bcache/btree.c if (c->btree_cache_alloc_lock == current) { current 943 drivers/md/bcache/btree.c BUG_ON(current->bio_list); current 1030 drivers/md/bcache/btree.c if (current->bio_list) current 2249 drivers/md/bcache/btree.c if (current->bio_list) { current 2331 drivers/md/bcache/btree.c BUG_ON(current->bio_list); current 120 drivers/md/bcache/closure.c struct closure_syncer s = { .task = current }; current 380 drivers/md/bcache/request.c struct task_struct *task = current; current 750 drivers/md/bcache/request.c s->iop.write_point = hash_long((unsigned long) current, 16); current 163 drivers/md/dm-bufio.c #define dm_bufio_in_request() (!!current->bio_list) current 802 drivers/md/dm-bufio.c DECLARE_WAITQUEUE(wait, current); current 1166 drivers/md/dm-integrity.c new_range->task = current; current 1356 drivers/md/dm-integrity.c DECLARE_WAITQUEUE(wait, current); current 1546 drivers/md/dm-snap.c DECLARE_WAITQUEUE(wait, current); current 518 drivers/md/dm.c if (r == -ENOTCONN && !fatal_signal_pending(current)) { current 551 drivers/md/dm.c current->comm, cmd); current 1639 drivers/md/dm.c if (current->bio_list && ci.sector_count && !error) { current 1762 drivers/md/dm.c if (current->bio_list) { current 2444 drivers/md/dm.c if (signal_pending_state(task_state, current)) { current 423 drivers/md/md.c WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk); current 7697 drivers/md/md.c if (signal_pending(current)) current 7698 drivers/md/md.c flush_signals(current); current 8512 drivers/md/md.c if (signal_pending(current)) current 8513 drivers/md/md.c flush_signals(current); current 8644 drivers/md/md.c flush_signals(current); /* just in case */ current 8984 drivers/md/md.c if (signal_pending(current)) { current 8990 drivers/md/md.c flush_signals(current); current 103 drivers/md/persistent-data/dm-block-manager.c if (lock->holders[i] == current) { current 205 drivers/md/persistent-data/dm-block-manager.c __add_holder(lock, current); current 210 drivers/md/persistent-data/dm-block-manager.c get_task_struct(current); current 212 drivers/md/persistent-data/dm-block-manager.c w.task = current; current 218 drivers/md/persistent-data/dm-block-manager.c put_task_struct(current); current 233 drivers/md/persistent-data/dm-block-manager.c __add_holder(lock, current); current 247 drivers/md/persistent-data/dm-block-manager.c __del_holder(lock, current); current 268 drivers/md/persistent-data/dm-block-manager.c __add_holder(lock, current); current 273 drivers/md/persistent-data/dm-block-manager.c get_task_struct(current); current 274 drivers/md/persistent-data/dm-block-manager.c w.task = current; current 285 drivers/md/persistent-data/dm-block-manager.c put_task_struct(current); current 293 drivers/md/persistent-data/dm-block-manager.c __del_holder(lock, current); current 1173 drivers/md/raid1.c if (from_schedule || current->bio_list) { current 997 drivers/md/raid10.c current->bio_list && current 998 drivers/md/raid10.c (!bio_list_empty(¤t->bio_list[0]) || current 999 drivers/md/raid10.c !bio_list_empty(¤t->bio_list[1]))), current 1077 drivers/md/raid10.c if (from_schedule || current->bio_list) { current 16 drivers/media/common/saa7146/saa7146_vbi.c DECLARE_WAITQUEUE(wait, current); current 118 drivers/media/common/saa7146/saa7146_vbi.c if(signal_pending(current)) { current 1892 drivers/media/dvb-core/dvb_ca_en50221.c if (signal_pending(current)) { current 1167 drivers/media/dvb-core/dvb_demux.c if (signal_pending(current)) current 673 drivers/media/dvb-core/dvb_frontend.c freezing(current), current 863 drivers/media/dvb-core/dvb_frontend.c if (signal_pending(current)) current 2755 drivers/media/dvb-core/dvb_frontend.c if (signal_pending(current)) current 314 drivers/media/i2c/msp3400-driver.c DECLARE_WAITQUEUE(wait, current); current 277 drivers/media/pci/cx18/cx18-driver.c sig = intr ? signal_pending(current) : 0; current 235 drivers/media/pci/cx18/cx18-fileops.c if (signal_pending(current)) { current 357 drivers/media/pci/ivtv/ivtv-driver.c return signal_pending(current) ? -EINTR : 0; current 369 drivers/media/pci/ivtv/ivtv-driver.c int ret = signal_pending(current); current 268 drivers/media/pci/ivtv/ivtv-fileops.c if (signal_pending(current)) { current 628 drivers/media/pci/ivtv/ivtv-fileops.c if (signal_pending(current)) { current 684 drivers/media/pci/ivtv/ivtv-fileops.c while (!(got_sig = signal_pending(current)) && current 173 drivers/media/pci/ivtv/ivtv-ioctl.c got_sig = signal_pending(current); current 1801 drivers/media/pci/ivtv/ivtv-ioctl.c if (signal_pending(current)) { current 794 drivers/media/pci/ivtv/ivtv-streams.c DECLARE_WAITQUEUE(wait, current); current 1085 drivers/media/pci/ivtv/ivtv-yuv.c got_sig = signal_pending(current); current 302 drivers/media/pci/ivtv/ivtvfb.c got_sig = signal_pending(current); current 1116 drivers/media/pci/pt1/pt1.c if (signal_pending(current)) current 499 drivers/media/pci/pt3/pt3.c if (signal_pending(current)) current 246 drivers/media/pci/solo6x10/solo6x10-i2c.c if (signal_pending(current)) current 636 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c DECLARE_WAITQUEUE(wait, current); current 262 drivers/media/pci/solo6x10/solo6x10-v4l2.c DECLARE_WAITQUEUE(wait, current); current 619 drivers/media/platform/exynos-gsc/gsc-m2m.c pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); current 682 drivers/media/platform/exynos-gsc/gsc-m2m.c task_pid_nr(current), gsc->state, gsc->m2m.refcnt); current 472 drivers/media/platform/exynos4-is/fimc-capture.c dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); current 532 drivers/media/platform/exynos4-is/fimc-capture.c dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); current 612 drivers/media/platform/exynos4-is/fimc-m2m.c pr_debug("pid: %d, state: %#lx\n", task_pid_nr(current), fimc->state); current 686 drivers/media/platform/exynos4-is/fimc-m2m.c task_pid_nr(current), fimc->state, fimc->m2m.refcnt); current 541 drivers/media/platform/s3c-camif/camif-capture.c vp->state, vp->owner, task_pid_nr(current)); current 573 drivers/media/platform/s3c-camif/camif-capture.c vp->state, vp->owner, task_pid_nr(current)); current 80 drivers/media/platform/vivid/vivid-radio-rx.c if (msleep_interruptible(20) && signal_pending(current)) current 65 drivers/media/platform/vivid/vivid-radio-tx.c if (msleep_interruptible(20) && signal_pending(current)) current 2340 drivers/media/usb/cpia2/cpia2_core.c if (signal_pending(current)) current 194 drivers/media/usb/cpia2/cpia2_v4l.c if (signal_pending(current)) current 897 drivers/media/usb/cpia2/cpia2_v4l.c if (signal_pending(current)) current 742 drivers/media/usb/gspca/cpia1.c if (signal_pending(current)) current 537 drivers/media/v4l2-core/videobuf-core.c down_read(¤t->mm->mmap_sem); current 624 drivers/media/v4l2-core/videobuf-core.c up_read(¤t->mm->mmap_sem); current 161 drivers/media/v4l2-core/videobuf-dma-contig.c struct mm_struct *mm = current->mm; current 204 drivers/media/v4l2-core/videobuf-dma-contig.c up_read(¤t->mm->mmap_sem); current 203 drivers/media/v4l2-core/videobuf-dma-sg.c down_read(¤t->mm->mmap_sem); current 205 drivers/media/v4l2-core/videobuf-dma-sg.c up_read(¤t->mm->mmap_sem); current 207 drivers/mfd/ucb1x00-ts.c DECLARE_WAITQUEUE(wait, current); current 290 drivers/misc/cxl/api.c ctx->mm = get_task_mm(current); current 476 drivers/misc/cxl/api.c rc = cxl_start_context(ctx, work->work_element_descriptor, current); current 216 drivers/misc/cxl/file.c ctx->pid = get_task_pid(current, PIDTYPE_PID); current 219 drivers/misc/cxl/file.c ctx->mm = get_task_mm(current); current 457 drivers/misc/cxl/file.c if (signal_pending(current)) { current 538 drivers/misc/cxl/guest.c pid = current->pid; current 541 drivers/misc/cxl/guest.c if (!test_tsk_thread_flag(current, TIF_32BIT)) current 615 drivers/misc/cxl/native.c if (!test_tsk_thread_flag(current, TIF_32BIT)) current 686 drivers/misc/cxl/native.c rc = set_thread_tidr(current); current 689 drivers/misc/cxl/native.c ctx->tidr = current->thread.tidr; current 748 drivers/misc/cxl/native.c pid = current->pid; current 938 drivers/misc/cxl/native.c pid = (u64)current->pid << 32; current 451 drivers/misc/fastrpc.c ctx->pid = current->pid; current 791 drivers/misc/fastrpc.c vma = find_vma(current->mm, ctx->args[i].ptr); current 892 drivers/misc/fastrpc.c msg->tid = current->pid; current 1004 drivers/misc/fastrpc.c inbuf.namelen = strlen(current->comm) + 1; current 1029 drivers/misc/fastrpc.c args[1].ptr = (u64)(uintptr_t)current->comm; current 1184 drivers/misc/fastrpc.c fl->tgid = current->tgid; current 47 drivers/misc/genwqe/card_dev.c cfile->opener = get_pid(task_tgid(current)); current 112 drivers/misc/habanalabs/habanalabs_drv.c hpriv->taskpid = find_get_pid(current->pid); current 212 drivers/misc/habanalabs/habanalabs_drv.c hpriv->taskpid = find_get_pid(current->pid); current 416 drivers/misc/habanalabs/habanalabs_ioctl.c task_pid_nr(current), cmd, nr); current 435 drivers/misc/habanalabs/habanalabs_ioctl.c task_pid_nr(current), nr); current 453 drivers/misc/habanalabs/habanalabs_ioctl.c task_pid_nr(current), nr); current 71 drivers/misc/ibmasm/r_heartbeat.c if (signal_pending(current) || rhb->stopped) { current 967 drivers/misc/ibmvmc.c if (signal_pending(current)) { current 1812 drivers/misc/ibmvmc.c set_user_nice(current, -20); current 595 drivers/misc/lis3lv02d/lis3lv02d.c DECLARE_WAITQUEUE(wait, current); current 615 drivers/misc/lis3lv02d/lis3lv02d.c if (signal_pending(current)) { current 250 drivers/misc/lkdtm/bugs.c const unsigned char *stack = task_stack_page(current); current 264 drivers/misc/lkdtm/bugs.c const unsigned char *stack = task_stack_page(current); current 72 drivers/misc/lkdtm/perms.c copied = access_process_vm(current, (unsigned long)dst, do_nothing, current 65 drivers/misc/lkdtm/usercopy.c bad_stack = task_stack_page(current) + THREAD_SIZE; current 74 drivers/misc/mei/bus.c if (signal_pending(current)) current 161 drivers/misc/mei/bus.c if (signal_pending(current)) current 170 drivers/misc/mei/bus.c if (signal_pending(current)) current 1778 drivers/misc/mei/client.c if (signal_pending(current)) current 183 drivers/misc/mei/main.c if (signal_pending(current)) current 302 drivers/misc/mei/main.c if (signal_pending(current)) current 666 drivers/misc/mei/main.c if (signal_pending(current)) current 769 drivers/misc/mic/scif/scif_api.c lep->files = current->files; current 1404 drivers/misc/mic/scif/scif_api.c if (signal_pending(current)) current 266 drivers/misc/mic/scif/scif_dma.c scif_init_mmu_notifier(mmn, current->mm, ep); current 267 drivers/misc/mic/scif/scif_dma.c if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) { current 1691 drivers/misc/mic/scif/scif_dma.c mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); current 1693 drivers/misc/mic/scif/scif_dma.c mmn = scif_add_mmu_notifier(current->mm, ep); current 254 drivers/misc/mic/scif/scif_rma.c return get_task_mm(current); current 1369 drivers/misc/mic/scif/scif_rma.c mm = current->mm; current 89 drivers/misc/ocxl/file.c rc = ocxl_context_attach(ctx, amr, current->mm); current 128 drivers/misc/ocxl/file.c if (set_thread_tidr(current)) { current 133 drivers/misc/ocxl/file.c ctx->tidr = current->thread.tidr; current 429 drivers/misc/ocxl/file.c if (signal_pending(current)) { current 489 drivers/misc/ocxl/link.c if (!test_tsk_thread_flag(current, TIF_32BIT)) current 568 drivers/misc/ocxl/link.c trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr); current 650 drivers/misc/ocxl/link.c trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid, current 173 drivers/misc/pti.c get_task_comm(comm, current); current 52 drivers/misc/sgi-gru/grufault.c vma = find_vma(current->mm, vaddr); current 68 drivers/misc/sgi-gru/grufault.c struct mm_struct *mm = current->mm; current 85 drivers/misc/sgi-gru/grufault.c struct mm_struct *mm = current->mm; current 112 drivers/misc/sgi-gru/grufault.c up_read(¤t->mm->mmap_sem); current 882 drivers/misc/sgi-gru/grufault.c gts->ts_tgid_owner = current->tgid; current 138 drivers/misc/sgi-gru/grufile.c down_write(¤t->mm->mmap_sem); current 149 drivers/misc/sgi-gru/grufile.c up_write(¤t->mm->mmap_sem); current 340 drivers/misc/sgi-gru/grumain.c gts->ts_mm = current->mm; current 729 drivers/misc/sgi-gru/grumain.c if (!gru || gts->ts_tgid_owner != current->tgid) current 269 drivers/misc/sgi-gru/grutlbpurge.c mn = mmu_notifier_get_locked(&gru_mmuops, current->mm); current 262 drivers/misc/sgi-xp/xpc_main.c set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU)); current 86 drivers/misc/vexpress-syscfg.c if (signal_pending(current)) current 799 drivers/mmc/core/core.c struct task_struct *task = ctx ? NULL : current; current 800 drivers/mmc/core/core.c DECLARE_WAITQUEUE(wait, current); current 146 drivers/mmc/core/sdio_irq.c sched_setscheduler(current, SCHED_FIFO, ¶m); current 168 drivers/mmc/core/sdio_uart.c if (likely(port->in_sdio_uart_irq != current)) current 176 drivers/mmc/core/sdio_uart.c if (likely(port->in_sdio_uart_irq != current)) current 525 drivers/mmc/core/sdio_uart.c if (unlikely(port->in_sdio_uart_irq == current)) current 532 drivers/mmc/core/sdio_uart.c port->in_sdio_uart_irq = current; current 947 drivers/mmc/host/tmio_mmc_core.c current->comm, task_pid_nr(current), current 953 drivers/mmc/host/tmio_mmc_core.c current->comm, task_pid_nr(current), current 988 drivers/mmc/host/tmio_mmc_core.c current->comm, task_pid_nr(current), current 808 drivers/mtd/chips/cfi_cmdset_0001.c DECLARE_WAITQUEUE(wait, current); current 926 drivers/mtd/chips/cfi_cmdset_0001.c DECLARE_WAITQUEUE(wait, current); current 1216 drivers/mtd/chips/cfi_cmdset_0001.c DECLARE_WAITQUEUE(wait, current); current 1292 drivers/mtd/chips/cfi_cmdset_0001.c DECLARE_WAITQUEUE(wait, current); current 881 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 1123 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 1317 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 1682 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 1828 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 1962 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 2421 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 2520 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 2920 drivers/mtd/chips/cfi_cmdset_0002.c DECLARE_WAITQUEUE(wait, current); current 252 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 427 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 740 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 982 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 1039 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 1186 drivers/mtd/chips/cfi_cmdset_0020.c DECLARE_WAITQUEUE(wait, current); current 143 drivers/mtd/lpddr/lpddr_cmds.c DECLARE_WAITQUEUE(wait, current); current 173 drivers/mtd/lpddr/lpddr_cmds.c DECLARE_WAITQUEUE(wait, current); current 270 drivers/mtd/lpddr/lpddr_cmds.c DECLARE_WAITQUEUE(wait, current); current 997 drivers/mtd/nand/onenand/onenand_base.c DECLARE_WAITQUEUE(wait, current); current 8 drivers/mtd/tests/mtd_test.h if (signal_pending(current)) { current 20 drivers/mtd/ubi/debug.h __func__, __LINE__, current->pid); \ current 29 drivers/mtd/ubi/debug.h pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \ current 32 drivers/mtd/ubi/gluebi.c current->pid, __func__, ##__VA_ARGS__) current 1622 drivers/mtd/ubi/wl.c ubi->bgt_name, task_pid_nr(current)); current 87 drivers/net/can/softing/softing_fw.c } while (!signal_pending(current)); current 330 drivers/net/can/softing/softing_fw.c if (signal_pending(current)) current 12058 drivers/net/ethernet/broadcom/tg3.c if (signal_pending(current)) { current 22 drivers/net/ethernet/intel/igbvf/ethtool.c #define IGBVF_STAT(current, base) \ current 23 drivers/net/ethernet/intel/igbvf/ethtool.c sizeof(((struct igbvf_adapter *)0)->current), \ current 24 drivers/net/ethernet/intel/igbvf/ethtool.c offsetof(struct igbvf_adapter, current), \ current 132 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->pid = current->pid; current 67 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h __func__, __LINE__, current->pid, ##__VA_ARGS__) current 71 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h __func__, __LINE__, current->pid, ##__VA_ARGS__) current 75 drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h __func__, __LINE__, current->pid, ##__VA_ARGS__) current 53 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 59 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 70 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 76 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 81 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 86 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 92 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 101 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h __func__, __LINE__, current->pid, \ current 214 drivers/net/ethernet/mellanox/mlx5/core/qp.c qp->pid = current->pid; current 635 drivers/net/ethernet/sfc/falcon/falcon.c if (signal_pending(current)) { current 728 drivers/net/ethernet/sfc/falcon/falcon.c if (signal_pending(current)) { current 758 drivers/net/ethernet/sfc/falcon/falcon.c if (signal_pending(current)) current 844 drivers/net/ethernet/sfc/falcon/falcon.c if (signal_pending(current)) current 426 drivers/net/ppp/ppp_generic.c DECLARE_WAITQUEUE(wait, current); current 465 drivers/net/ppp/ppp_generic.c if (signal_pending(current)) current 597 drivers/net/ppp/ppp_generic.c err = ppp_unattached_ioctl(current->nsproxy->net_ns, current 609 drivers/net/ppp/ppp_generic.c current->comm, current->pid); current 2596 drivers/net/ppp/ppp_generic.c return ppp_register_net_channel(current->nsproxy->net_ns, chan); current 825 drivers/net/slip/slip.c sl->pid = current->pid; current 1248 drivers/net/slip/slip.c if (sl->tty != current->signal->tty && current 1249 drivers/net/slip/slip.c sl->pid != current->pid) { current 503 drivers/net/tap.c struct net *net = current->nsproxy->net_ns; current 853 drivers/net/tap.c if (signal_pending(current)) { current 1661 drivers/net/tun.c struct page_frag *alloc_frag = ¤t->task_frag; current 2173 drivers/net/tun.c DECLARE_WAITQUEUE(wait, current); current 2192 drivers/net/tun.c if (signal_pending(current)) { current 3409 drivers/net/tun.c __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); current 3420 drivers/net/tun.c struct net *net = current->nsproxy->net_ns; current 1544 drivers/net/usb/hso.c DECLARE_WAITQUEUE(wait, current); current 1572 drivers/net/usb/hso.c if (signal_pending(current)) { current 2668 drivers/net/usb/lan78xx.c DECLARE_WAITQUEUE(wait, current); current 780 drivers/net/usb/usbnet.c DECLARE_WAITQUEUE(wait, current); current 772 drivers/net/wan/cosa.c DECLARE_WAITQUEUE(wait, current); current 801 drivers/net/wan/cosa.c if (signal_pending(current) && chan->rx_status == 0) { current 847 drivers/net/wan/cosa.c DECLARE_WAITQUEUE(wait, current); current 885 drivers/net/wan/cosa.c if (signal_pending(current) && chan->tx_status == 0) { current 891 drivers/net/wimax/i2400m/tx.c current->pid, (void *) tx_msg - i2400m->tx_buf, current 343 drivers/net/wimax/i2400m/usb-rx.c i2400mu->rx_kthread = current; current 191 drivers/net/wimax/i2400m/usb-tx.c i2400mu->tx_kthread = current; current 500 drivers/net/wireless/ath/ath6kl/cfg80211.c if (signal_pending(current)) { current 1467 drivers/net/wireless/ath/ath6kl/cfg80211.c if (signal_pending(current)) { current 1728 drivers/net/wireless/broadcom/b43legacy/main.c if (signal_pending(current)) { current 825 drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c current->pid, name, ifp->mac_addr); current 1694 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c DECLARE_WAITQUEUE(wait, current); current 1701 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c while (!(*condition) && (!signal_pending(current) && timeout)) current 1704 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (signal_pending(current)) current 1753 drivers/net/wireless/cisco/airo.c ai->list_bss_task = current; current 3080 drivers/net/wireless/cisco/airo.c init_waitqueue_entry(&wait, current); current 3097 drivers/net/wireless/cisco/airo.c !freezing(current)) { current 3110 drivers/net/wireless/cisco/airo.c !freezing(current)) { current 3116 drivers/net/wireless/cisco/airo.c current->state = TASK_RUNNING; current 318 drivers/net/wireless/intersil/hostap/hostap_hw.c DECLARE_WAITQUEUE(wait, current); current 335 drivers/net/wireless/intersil/hostap/hostap_hw.c if (signal_pending(current)) current 367 drivers/net/wireless/intersil/hostap/hostap_hw.c if (signal_pending(current)) current 384 drivers/net/wireless/intersil/hostap/hostap_hw.c if (!err && signal_pending(current)) current 2549 drivers/net/wireless/intersil/hostap/hostap_ioctl.c init_waitqueue_entry(&__wait, current); current 2553 drivers/net/wireless/intersil/hostap/hostap_ioctl.c if (signal_pending(current)) current 2907 drivers/net/wireless/intersil/hostap/hostap_ioctl.c dev->name, task_pid_nr(current), current->comm); current 441 drivers/net/wireless/marvell/libertas/main.c init_waitqueue_entry(&wait, current); current 1554 drivers/net/wireless/marvell/mwl8k.c if (priv->hw_restart_owner == current) current 2152 drivers/net/wireless/marvell/mwl8k.c if (priv->fw_mutex_owner != current) { current 2168 drivers/net/wireless/marvell/mwl8k.c priv->fw_mutex_owner = current; current 4913 drivers/net/wireless/marvell/mwl8k.c priv->hw_restart_owner = current; current 58 drivers/net/wireless/realtek/rtlwifi/debug.c current->comm, current->pid, titlestring); current 156 drivers/net/wireless/rsi/rsi_91x_sdio.c dev->sdio_irq_task = current; current 420 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 428 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 453 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 461 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 507 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 512 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 551 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 556 drivers/net/wireless/rsi/rsi_91x_sdio.c if (likely(dev->sdio_irq_task != current)) current 198 drivers/net/wireless/st/cw1200/cw1200_spi.c DECLARE_WAITQUEUE(wait, current); current 1109 drivers/nvdimm/region_devs.c idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); current 2085 drivers/nvme/host/core.c if (fatal_signal_pending(current)) current 2179 drivers/nvme/host/core.c if (fatal_signal_pending(current)) current 91 drivers/oprofile/buffer_sync.c struct mm_struct *mm = current->mm; current 266 drivers/oprofile/cpu_buffer.c struct task_struct *tsk = task ? task : current; current 367 drivers/oprofile/cpu_buffer.c if (op_add_code(cpu_buf, 0, is_kernel, current)) current 172 drivers/oprofile/event_buffer.c if (signal_pending(current)) current 119 drivers/parport/daisy.c if (signal_pending(current)) current 126 drivers/parport/ieee1284.c if (signal_pending (current)) current 195 drivers/parport/ieee1284.c if (signal_pending (current)) current 108 drivers/parport/ieee1284_ops.c if (signal_pending (current)) current 115 drivers/parport/ieee1284_ops.c if (signal_pending (current)) current 444 drivers/parport/ieee1284_ops.c if (signal_pending (current)) { current 548 drivers/parport/ieee1284_ops.c if (signal_pending (current)) current 680 drivers/parport/ieee1284_ops.c if (signal_pending (current)) { current 1245 drivers/parport/parport_ip32.c if (signal_pending(current)) { current 1504 drivers/parport/parport_ip32.c if (signal_pending(current)) current 1515 drivers/parport/parport_ip32.c if (signal_pending(current)) current 170 drivers/parport/parport_pc.c if (signal_pending(current)) current 182 drivers/parport/parport_pc.c if (signal_pending(current)) current 539 drivers/parport/parport_pc.c if (signal_pending(current)) current 1282 drivers/parport/share.c if (signal_pending(current)) current 208 drivers/pci/access.c DECLARE_WAITQUEUE(wait, current); current 472 drivers/pci/hotplug/cpci_hotplug_core.c if (kthread_should_stop() || signal_pending(current)) current 706 drivers/pci/hotplug/cpqphp.h DECLARE_WAITQUEUE(wait, current); current 714 drivers/pci/hotplug/cpqphp.h if (signal_pending(current)) current 38 drivers/pci/quirks.c pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current)); current 160 drivers/pci/vpd.c if (fatal_signal_pending(current)) current 597 drivers/pcmcia/cs.c skt->thread = current; current 464 drivers/platform/x86/intel_speed_select_if/isst_if_common.c if (signal_pending(current)) { current 417 drivers/platform/x86/thinkpad_acpi.c what, task_tgid_vnr(current)); current 425 drivers/platform/x86/thinkpad_acpi.c what, task_tgid_vnr(current), ## arg); \ current 260 drivers/powercap/idle_inject.c sched_setscheduler(current, SCHED_FIFO, ¶m); current 714 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 745 drivers/rapidio/devices/rio_mport_cdev.c rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), current 771 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 779 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 787 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 1049 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 1058 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 1067 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), current 1952 drivers/rapidio/devices/rio_mport_cdev.c filp, current->comm, task_pid_nr(current)); current 1964 drivers/rapidio/devices/rio_mport_cdev.c filp, current->comm, task_pid_nr(current)); current 1985 drivers/rapidio/devices/rio_mport_cdev.c current->comm, task_pid_nr(current), wret); current 1990 drivers/rapidio/devices/rio_mport_cdev.c filp, current->comm, task_pid_nr(current)); current 315 drivers/rapidio/devices/tsi721_dma.c bdma_chan->id, task_pid_nr(current)); current 321 drivers/rapidio/devices/tsi721_dma.c task_pid_nr(current)); current 792 drivers/rapidio/rio_cm.c riocm_error("%s(%d) ch_%d not found", current->comm, current 793 drivers/rapidio/rio_cm.c task_pid_nr(current), ch_id); current 1442 drivers/rapidio/rio_cm.c ch->id, current->comm, task_pid_nr(current)); current 1458 drivers/rapidio/rio_cm.c current->comm, task_pid_nr(current), ch->id); current 1463 drivers/rapidio/rio_cm.c current->comm, task_pid_nr(current), ch->id); current 1483 drivers/rapidio/rio_cm.c current->comm, task_pid_nr(current), filp); current 1501 drivers/rapidio/rio_cm.c current->comm, task_pid_nr(current), filp); current 1508 drivers/rapidio/rio_cm.c ch->id, current->comm, current 1509 drivers/rapidio/rio_cm.c task_pid_nr(current)); current 1666 drivers/rapidio/rio_cm.c ch_num, current->comm, task_pid_nr(current)); current 1673 drivers/rapidio/rio_cm.c ch_num, current->comm, task_pid_nr(current)); current 1692 drivers/rapidio/rio_cm.c ch_num, current->comm, task_pid_nr(current)); current 1756 drivers/rapidio/rio_cm.c param.ch_num, current->comm, task_pid_nr(current)); current 1767 drivers/rapidio/rio_cm.c ch->id, current->comm, task_pid_nr(current)); current 158 drivers/regulator/core.c if (rdev->mutex_owner == current) current 174 drivers/regulator/core.c rdev->mutex_owner = current; current 145 drivers/rtc/dev.c DECLARE_WAITQUEUE(wait, current); current 169 drivers/rtc/dev.c if (signal_pending(current)) { current 635 drivers/s390/char/con3215.c DECLARE_WAITQUEUE(wait, current); current 465 drivers/s390/char/fs3270.c fp->fs_pid = get_pid(task_pid(current)); current 492 drivers/s390/char/keyboard.c perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG); current 1345 drivers/s390/cio/device.c if (signal_pending(current)) current 482 drivers/s390/crypto/ap_bus.c DECLARE_WAITQUEUE(wait, current); current 484 drivers/s390/crypto/ap_bus.c set_user_nice(current, MAX_NICE); current 471 drivers/s390/crypto/zcrypt_msgtype50.c ap_msg.psmid = (((unsigned long long) current->pid) << 32) + current 517 drivers/s390/crypto/zcrypt_msgtype50.c ap_msg.psmid = (((unsigned long long) current->pid) << 32) + current 1027 drivers/s390/crypto/zcrypt_msgtype6.c ap_msg.psmid = (((unsigned long long) current->pid) << 32) + current 1071 drivers/s390/crypto/zcrypt_msgtype6.c ap_msg.psmid = (((unsigned long long) current->pid) << 32) + current 1114 drivers/s390/crypto/zcrypt_msgtype6.c ap_msg->psmid = (((unsigned long long) current->pid) << 32) + current 1169 drivers/s390/crypto/zcrypt_msgtype6.c ap_msg->psmid = (((unsigned long long) current->pid) << 32) + current 1263 drivers/s390/crypto/zcrypt_msgtype6.c ap_msg->psmid = (((unsigned long long) current->pid) << 32) + current 125 drivers/sbus/char/bbc_i2c.c DECLARE_WAITQUEUE(wait, current); current 1002 drivers/sbus/char/envctrl.c printk(KERN_INFO PFX "%s starting...\n", current->comm); current 1017 drivers/sbus/char/envctrl.c current->comm, whichcpu, current 1024 drivers/sbus/char/envctrl.c printk(KERN_INFO PFX "%s exiting...\n", current->comm); current 379 drivers/sbus/char/oradax.c if (ctx->owner != current) { current 552 drivers/sbus/char/oradax.c if (ctx->client != current) current 585 drivers/sbus/char/oradax.c if (ctx->owner != current) current 611 drivers/sbus/char/oradax.c ctx->client = current; current 628 drivers/sbus/char/oradax.c ctx->client = current; current 673 drivers/sbus/char/oradax.c ctx->owner = current; current 858 drivers/sbus/char/oradax.c ctx->client = current; current 862 drivers/sbus/char/oradax.c if (ctx->owner != current) { current 1492 drivers/scsi/aacraid/commsup.c if (aac->thread && aac->thread->pid != current->pid) { current 2442 drivers/scsi/aacraid/commsup.c DECLARE_WAITQUEUE(wait, current); current 489 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_user_nice(current, MIN_NICE); current 652 drivers/scsi/bnx2fc/bnx2fc_fcoe.c set_user_nice(current, MIN_NICE); current 983 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (signal_pending(current)) current 984 drivers/scsi/bnx2fc/bnx2fc_fcoe.c flush_signals(current); current 1950 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (signal_pending(current)) current 1951 drivers/scsi/bnx2fc/bnx2fc_fcoe.c flush_signals(current); current 75 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (signal_pending(current)) current 76 drivers/scsi/bnx2fc/bnx2fc_tgt.c flush_signals(current); current 286 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (signal_pending(current)) current 287 drivers/scsi/bnx2fc/bnx2fc_tgt.c flush_signals(current); current 1857 drivers/scsi/bnx2i/bnx2i_hwi.c set_user_nice(current, MIN_NICE); current 1621 drivers/scsi/bnx2i/bnx2i_iscsi.c if (signal_pending(current)) current 1622 drivers/scsi/bnx2i/bnx2i_iscsi.c flush_signals(current); current 1744 drivers/scsi/bnx2i/bnx2i_iscsi.c if (signal_pending(current)) current 1745 drivers/scsi/bnx2i/bnx2i_iscsi.c flush_signals(current); current 1856 drivers/scsi/bnx2i/bnx2i_iscsi.c if (signal_pending(current)) current 1857 drivers/scsi/bnx2i/bnx2i_iscsi.c flush_signals(current); current 2095 drivers/scsi/bnx2i/bnx2i_iscsi.c if (signal_pending(current)) current 2096 drivers/scsi/bnx2i/bnx2i_iscsi.c flush_signals(current); current 360 drivers/scsi/cxlflash/ocxl_hw.c pid = current->mm->context.id; current 361 drivers/scsi/cxlflash/ocxl_hw.c mm = current->mm; current 1047 drivers/scsi/cxlflash/ocxl_hw.c if (signal_pending(current)) { current 165 drivers/scsi/cxlflash/superpipe.c pid_t pid = task_tgid_nr(current), ctxpid = 0; current 173 drivers/scsi/cxlflash/superpipe.c pid = task_ppid_nr(current); current 836 drivers/scsi/cxlflash/superpipe.c ctxi->pid = task_tgid_nr(current); /* tgid = pid */ current 1157 drivers/scsi/dpt_i2o.c DECLARE_WAITQUEUE(wait, current); current 4475 drivers/scsi/ibmvscsi/ibmvfc.c set_user_nice(current, MIN_NICE); current 2182 drivers/scsi/ibmvscsi/ibmvscsi.c set_user_nice(current, MIN_NICE); current 601 drivers/scsi/libfc/fc_exch.c while (ep->resp_active && ep->resp_task != current) { current 734 drivers/scsi/libfc/fc_exch.c if (ep->resp_task != current) current 735 drivers/scsi/libfc/fc_exch.c ep->resp_task = !ep->resp_task ? current : NULL; current 1837 drivers/scsi/libiscsi.c if (signal_pending(current)) current 1838 drivers/scsi/libiscsi.c flush_signals(current); current 2407 drivers/scsi/libiscsi.c if (signal_pending(current)) current 2408 drivers/scsi/libiscsi.c flush_signals(current); current 2671 drivers/scsi/libiscsi.c if (signal_pending(current)) current 2672 drivers/scsi/libiscsi.c flush_signals(current); current 452 drivers/scsi/libsas/sas_discover.c task_pid_nr(current)); current 492 drivers/scsi/libsas/sas_discover.c task_pid_nr(current), error); current 507 drivers/scsi/libsas/sas_discover.c port->id, task_pid_nr(current)); current 514 drivers/scsi/libsas/sas_discover.c task_pid_nr(current)); current 520 drivers/scsi/libsas/sas_discover.c port->id, task_pid_nr(current), res); current 500 drivers/scsi/libsas/sas_scsi_host.c if (current != host->ehandler) current 522 drivers/scsi/libsas/sas_scsi_host.c if (current != host->ehandler) current 2664 drivers/scsi/lpfc/lpfc_bsg.c evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, current 3211 drivers/scsi/lpfc/lpfc_bsg.c evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid, current 748 drivers/scsi/lpfc/lpfc_hbadisc.c set_user_nice(current, MIN_NICE); current 749 drivers/scsi/lpfc/lpfc_hbadisc.c current->flags |= PF_NOFREEZE; current 147 drivers/scsi/lpfc/lpfc_vport.c if (signal_pending(current)) { current 804 drivers/scsi/qedi/qedi_main.c set_user_nice(current, -20); current 1864 drivers/scsi/qedi/qedi_main.c set_user_nice(current, -20); current 513 drivers/scsi/qla2xxx/qla_mbx.c } else if (current == ha->dpc_thread) { current 6144 drivers/scsi/qla2xxx/qla_os.c set_user_nice(current, MIN_NICE); current 1237 drivers/scsi/scsi_error.c current->comm)); current 1251 drivers/scsi/scsi_error.c current->comm)); current 1345 drivers/scsi/scsi_error.c current->comm)); current 1414 drivers/scsi/scsi_error.c current->comm)); current 1431 drivers/scsi/scsi_error.c current->comm)); current 1447 drivers/scsi/scsi_error.c current->comm)); current 1480 drivers/scsi/scsi_error.c current->comm)); current 1495 drivers/scsi/scsi_error.c "%s: Sending BDR\n", current->comm)); current 1512 drivers/scsi/scsi_error.c "%s: BDR failed\n", current->comm)); current 1549 drivers/scsi/scsi_error.c current->comm)); current 1559 drivers/scsi/scsi_error.c current->comm, id)); current 1566 drivers/scsi/scsi_error.c current->comm, id)); current 1612 drivers/scsi/scsi_error.c current->comm)); current 1633 drivers/scsi/scsi_error.c current->comm, channel)); current 1650 drivers/scsi/scsi_error.c current->comm, channel)); current 1677 drivers/scsi/scsi_error.c current->comm)); current 1690 drivers/scsi/scsi_error.c current->comm)); current 2097 drivers/scsi/scsi_error.c current->comm)); current 2110 drivers/scsi/scsi_error.c current->comm)); current 217 drivers/scsi/scsi_ioctl.c "ioctl, please convert it to SG_IO\n", current->comm); current 2556 drivers/scsi/scsi_lib.c WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); current 2558 drivers/scsi/scsi_lib.c if (sdev->quiesced_by == current) current 2576 drivers/scsi/scsi_lib.c sdev->quiesced_by = current; current 224 drivers/scsi/sg.c caller, task_tgid_vnr(current), current->comm); current 229 drivers/scsi/sg.c caller, task_tgid_vnr(current), current->comm); current 709 drivers/scsi/sg.c current->comm); current 461 drivers/scsi/st.c if (signal_pending(current)) current 605 drivers/scsi/st.c if (signal_pending(current)) current 659 drivers/scsi/xen-scsifront.c if (info && current == info->curr) { current 677 drivers/scsi/xen-scsifront.c if (info && current == info->curr) { current 985 drivers/scsi/xen-scsifront.c info->curr = current; current 365 drivers/staging/android/ashmem.c return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); current 540 drivers/staging/android/ashmem.c if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) current 17 drivers/staging/android/ion/ion_page_pool.c if (fatal_signal_pending(current)) current 443 drivers/staging/android/vsoc.c current->timer_slack_ns); current 476 drivers/staging/android/vsoc.c if (signal_pending(current)) { current 2450 drivers/staging/comedi/comedi_fops.c DECLARE_WAITQUEUE(wait, current); current 2509 drivers/staging/comedi/comedi_fops.c if (signal_pending(current)) { current 2586 drivers/staging/comedi/comedi_fops.c DECLARE_WAITQUEUE(wait, current); current 2643 drivers/staging/comedi/comedi_fops.c if (signal_pending(current)) { current 3677 drivers/staging/exfat/exfat_super.c opts->fs_fmask = opts->fs_dmask = current->fs->umask; current 139 drivers/staging/gasket/gasket_core.c (info->ownership.owner == current->tgid)); current 1176 drivers/staging/gasket/gasket_core.c struct pid_namespace *pid_ns = task_active_pid_ns(current); current 1182 drivers/staging/gasket/gasket_core.c get_task_comm(task_name, current); current 1189 drivers/staging/gasket/gasket_core.c current->tgid, task_name, filp->f_mode, current 1205 drivers/staging/gasket/gasket_core.c if (ownership->is_owned && ownership->owner != current->tgid && current 1209 drivers/staging/gasket/gasket_core.c current->tgid, ownership->owner); current 1225 drivers/staging/gasket/gasket_core.c ownership->owner = current->tgid; current 1255 drivers/staging/gasket/gasket_core.c struct pid_namespace *pid_ns = task_active_pid_ns(current); current 1261 drivers/staging/gasket/gasket_core.c get_task_comm(task_name, current); current 1267 drivers/staging/gasket/gasket_core.c current->tgid, task_name, file->f_mode, current 665 drivers/staging/greybus/uart.c DECLARE_WAITQUEUE(wait, current); current 694 drivers/staging/greybus/uart.c } else if (signal_pending(current)) { current 79 drivers/staging/kpc2000/kpc_dma/fileops.c down_read(¤t->mm->mmap_sem); /* get memory map semaphore */ current 81 drivers/staging/kpc2000/kpc_dma/fileops.c up_read(¤t->mm->mmap_sem); /* release the semaphore */ current 209 drivers/staging/rtl8188eu/core/rtw_cmd.c if (signal_pending(current)) current 210 drivers/staging/rtl8188eu/core/rtw_cmd.c flush_signals(current); current 206 drivers/staging/rtl8188eu/os_dep/usb_intf.c pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid); current 271 drivers/staging/rtl8188eu/os_dep/usb_intf.c pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid); current 56 drivers/staging/rtl8712/osdep_service.h if (signal_pending(current)) current 57 drivers/staging/rtl8712/osdep_service.h flush_signals(current); current 492 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c if (signal_pending(current)) { current 493 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c flush_signals(current); current 120 drivers/staging/rtl8723bs/include/osdep_service.h if (signal_pending (current)) current 122 drivers/staging/rtl8723bs/include/osdep_service.h flush_signals(current); current 1437 drivers/staging/rtl8723bs/os_dep/os_intfs.c DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid); current 1793 drivers/staging/rtl8723bs/os_dep/os_intfs.c DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid); current 66 drivers/staging/rtl8723bs/os_dep/sdio_intf.c rtw_sdio_set_irq_thd(psdpriv, current); current 596 drivers/staging/rtl8723bs/os_dep/sdio_intf.c DBG_871X("==> %s (%s:%d)\n", __func__, current->comm, current->pid); current 17 drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c if (sdio_data->sys_sdio_irq_thd && sdio_data->sys_sdio_irq_thd == current) current 227 drivers/staging/speakup/speakup_soft.c if (signal_pending(current)) { current 1442 drivers/staging/unisys/visornic/visornic_main.c for_each_netdev_rcu(current->nsproxy->net_ns, dev) { current 368 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c status = vchiq_open_service_internal(service, current->pid); current 451 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter->pid == current->pid) { current 487 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || current 501 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter->pid = current->pid; current 507 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, current->pid); current 1074 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter->pid == current->pid) { current 1083 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c current->pid); current 1089 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c current->pid); current 1099 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || current 1112 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter->pid = current->pid; current 1118 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, current->pid); current 1937 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c instance->pid = current->tgid; current 2328 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c flush_signals(current); current 2856 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (current == service->state->slot_handler_thread) { current 2914 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c (current == service->state->slot_handler_thread)) { current 49 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c flush_signals(current); current 62 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c flush_signals(current); current 76 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c flush_signals(current); current 331 drivers/target/iscsi/cxgbit/cxgbit_target.c signal_pending(current))) { current 1599 drivers/target/iscsi/cxgbit/cxgbit_target.c if (signal_pending(current)) current 1634 drivers/target/iscsi/cxgbit/cxgbit_target.c iscsit_thread_check_cpumask(conn, current, 0); current 3810 drivers/target/iscsi/iscsi_target.c iscsit_thread_check_cpumask(conn, current, 1); current 3815 drivers/target/iscsi/iscsi_target.c if (signal_pending(current)) current 3955 drivers/target/iscsi/iscsi_target.c iscsit_thread_check_cpumask(conn, current, 0); current 4045 drivers/target/iscsi/iscsi_target.c if (!signal_pending(current)) current 4127 drivers/target/iscsi/iscsi_target.c if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) { current 4133 drivers/target/iscsi/iscsi_target.c } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) { current 1254 drivers/target/iscsi/iscsi_target_login.c flush_signals(current); current 578 drivers/target/iscsi/iscsi_target_nego.c conn, current->comm, current->pid); current 608 drivers/target/iscsi/iscsi_target_nego.c conn->login_kworker = current; current 614 drivers/target/iscsi/iscsi_target_nego.c pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); current 619 drivers/target/iscsi/iscsi_target_nego.c flush_signals(current); current 626 drivers/target/iscsi/iscsi_target_nego.c conn, current->comm, current->pid); current 156 drivers/target/iscsi/iscsi_target_util.c if (signal_pending_state(state, current)) current 554 drivers/tee/optee/call.c struct mm_struct *mm = current->mm; current 1262 drivers/tty/amiserial.c if (signal_pending(current)) { current 1424 drivers/tty/amiserial.c if (signal_pending(current)) current 1572 drivers/tty/cyclades.c current->pid, info->port.count); current 1508 drivers/tty/mxser.c current->comm, cmd); current 2018 drivers/tty/mxser.c if (signal_pending(current)) current 2043 drivers/tty/n_gsm.c if (signal_pending(current)) current 566 drivers/tty/n_hdlc.c DECLARE_WAITQUEUE(wait, current); current 623 drivers/tty/n_hdlc.c if (signal_pending(current)) { current 650 drivers/tty/n_hdlc.c DECLARE_WAITQUEUE(wait, current); current 697 drivers/tty/n_hdlc.c if (signal_pending(current)) { current 953 drivers/tty/n_r3964.c tty, current->pid, tty->disc_data); current 1083 drivers/tty/n_r3964.c pClient = findClient(pInfo, task_pid(current)); current 1172 drivers/tty/n_r3964.c pClient = findClient(pInfo, task_pid(current)); current 1200 drivers/tty/n_r3964.c return enable_signals(pInfo, task_pid(current), arg); current 1213 drivers/tty/n_r3964.c return read_telegram(pInfo, task_pid(current), current 1252 drivers/tty/n_r3964.c pClient = findClient(pInfo, task_pid(current)); current 2220 drivers/tty/n_tty.c if (signal_pending(current)) { current 2323 drivers/tty/n_tty.c if (signal_pending(current)) { current 1454 drivers/tty/rocket.c if (signal_pending(current)) current 177 drivers/tty/serial/max3100.c if (!s->force_end_work && !freezing(current) && !s->suspending) current 315 drivers/tty/serial/max3100.c !freezing(current) && current 992 drivers/tty/serial/serial_core.c current->comm, current 1199 drivers/tty/serial/serial_core.c DECLARE_WAITQUEUE(wait, current); current 1233 drivers/tty/serial/serial_core.c if (signal_pending(current)) { current 1642 drivers/tty/serial/serial_core.c if (signal_pending(current)) current 2664 drivers/tty/synclink.c DECLARE_WAITQUEUE(wait, current); current 2712 drivers/tty/synclink.c if (signal_pending(current)) { current 2782 drivers/tty/synclink.c DECLARE_WAITQUEUE(wait, current); current 2793 drivers/tty/synclink.c if (signal_pending(current)) { current 3149 drivers/tty/synclink.c if (signal_pending(current)) current 3158 drivers/tty/synclink.c if (signal_pending(current)) current 3250 drivers/tty/synclink.c DECLARE_WAITQUEUE(wait, current); current 3305 drivers/tty/synclink.c if (signal_pending(current)) { current 918 drivers/tty/synclink_gt.c if (signal_pending(current)) current 2728 drivers/tty/synclink_gt.c DECLARE_WAITQUEUE(wait, current); current 2769 drivers/tty/synclink_gt.c if (signal_pending(current)) { current 2993 drivers/tty/synclink_gt.c init_waitqueue_entry(&w->wait, current); current 3078 drivers/tty/synclink_gt.c if (signal_pending(current)) current 3101 drivers/tty/synclink_gt.c DECLARE_WAITQUEUE(wait, current); current 3112 drivers/tty/synclink_gt.c if (signal_pending(current)) { current 3232 drivers/tty/synclink_gt.c DECLARE_WAITQUEUE(wait, current); current 3281 drivers/tty/synclink_gt.c if (signal_pending(current)) { current 1088 drivers/tty/synclinkmp.c if (signal_pending(current)) current 1100 drivers/tty/synclinkmp.c if (signal_pending(current)) current 3006 drivers/tty/synclinkmp.c DECLARE_WAITQUEUE(wait, current); current 3056 drivers/tty/synclinkmp.c if (signal_pending(current)) { current 3126 drivers/tty/synclinkmp.c DECLARE_WAITQUEUE(wait, current); current 3137 drivers/tty/synclinkmp.c if (signal_pending(current)) { current 3255 drivers/tty/synclinkmp.c DECLARE_WAITQUEUE(wait, current); current 3311 drivers/tty/synclinkmp.c if (signal_pending(current)) { current 26 drivers/tty/tty_audit.c buf = current->signal->tty_audit_buf; current 64 drivers/tty/tty_audit.c pid_t pid = task_pid_nr(current); current 65 drivers/tty/tty_audit.c uid_t uid = from_kuid(&init_user_ns, task_uid(current)); current 66 drivers/tty/tty_audit.c uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current)); current 67 drivers/tty/tty_audit.c unsigned int sessionid = audit_get_sessionid(current); current 71 drivers/tty/tty_audit.c char name[sizeof(current->comm)]; current 76 drivers/tty/tty_audit.c get_task_comm(name, current); current 115 drivers/tty/tty_audit.c buf = xchg(¤t->signal->tty_audit_buf, ERR_PTR(-ESRCH)); current 130 drivers/tty/tty_audit.c sig->audit_tty = current->signal->audit_tty; current 157 drivers/tty/tty_audit.c if (~current->signal->audit_tty & AUDIT_TTY_ENABLE) current 191 drivers/tty/tty_audit.c if (cmpxchg(¤t->signal->tty_audit_buf, NULL, buf) != NULL) current 208 drivers/tty/tty_audit.c audit_tty = READ_ONCE(current->signal->audit_tty); current 970 drivers/tty/tty_io.c if (signal_pending(current)) current 2037 drivers/tty/tty_io.c if (retval != -EAGAIN || signal_pending(current)) current 2062 drivers/tty/tty_io.c if (signal_pending(current)) current 2140 drivers/tty/tty_io.c pid = task_pid(current); current 2188 drivers/tty/tty_io.c if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) current 2392 drivers/tty/tty_io.c if (!signal_pending(current)) current 2397 drivers/tty/tty_io.c if (signal_pending(current)) current 2500 drivers/tty/tty_io.c __func__, get_task_comm(comm, current), flags); current 2562 drivers/tty/tty_io.c if (signal_pending(current)) current 2722 drivers/tty/tty_io.c __func__, get_task_comm(comm, current), flags); current 410 drivers/tty/tty_ioctl.c if (signal_pending(current)) current 477 drivers/tty/tty_ioctl.c if (signal_pending(current)) current 17 drivers/tty/tty_jobctrl.c return (sigismember(¤t->blocked, sig) || current 18 drivers/tty/tty_jobctrl.c current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); current 37 drivers/tty/tty_jobctrl.c if (current->signal->tty != tty) current 41 drivers/tty/tty_jobctrl.c pgrp = task_pgrp(current); current 105 drivers/tty/tty_jobctrl.c tty->pgrp = get_pid(task_pgrp(current)); current 107 drivers/tty/tty_jobctrl.c tty->session = get_pid(task_session(current)); current 108 drivers/tty/tty_jobctrl.c if (current->signal->tty) { current 110 drivers/tty/tty_jobctrl.c current->signal->tty->name); current 111 drivers/tty/tty_jobctrl.c tty_kref_put(current->signal->tty); current 113 drivers/tty/tty_jobctrl.c put_pid(current->signal->tty_old_pgrp); current 114 drivers/tty/tty_jobctrl.c current->signal->tty = tty_kref_get(tty); current 115 drivers/tty/tty_jobctrl.c current->signal->tty_old_pgrp = NULL; current 120 drivers/tty/tty_jobctrl.c spin_lock_irq(¤t->sighand->siglock); current 122 drivers/tty/tty_jobctrl.c spin_unlock_irq(¤t->sighand->siglock); current 131 drivers/tty/tty_jobctrl.c spin_lock_irq(¤t->sighand->siglock); current 132 drivers/tty/tty_jobctrl.c if (current->signal->leader && current 133 drivers/tty/tty_jobctrl.c !current->signal->tty && current 152 drivers/tty/tty_jobctrl.c spin_unlock_irq(¤t->sighand->siglock); current 161 drivers/tty/tty_jobctrl.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 162 drivers/tty/tty_jobctrl.c tty = tty_kref_get(current->signal->tty); current 163 drivers/tty/tty_jobctrl.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 261 drivers/tty/tty_jobctrl.c if (!current->signal->leader) current 281 drivers/tty/tty_jobctrl.c spin_lock_irq(¤t->sighand->siglock); current 282 drivers/tty/tty_jobctrl.c old_pgrp = current->signal->tty_old_pgrp; current 283 drivers/tty/tty_jobctrl.c current->signal->tty_old_pgrp = NULL; current 284 drivers/tty/tty_jobctrl.c spin_unlock_irq(¤t->sighand->siglock); current 293 drivers/tty/tty_jobctrl.c spin_lock_irq(¤t->sighand->siglock); current 294 drivers/tty/tty_jobctrl.c put_pid(current->signal->tty_old_pgrp); current 295 drivers/tty/tty_jobctrl.c current->signal->tty_old_pgrp = NULL; current 297 drivers/tty/tty_jobctrl.c tty = tty_kref_get(current->signal->tty); current 309 drivers/tty/tty_jobctrl.c spin_unlock_irq(¤t->sighand->siglock); current 312 drivers/tty/tty_jobctrl.c session_clear_tty(task_session(current)); current 325 drivers/tty/tty_jobctrl.c struct task_struct *tsk = current; current 350 drivers/tty/tty_jobctrl.c if (current->signal->leader && (task_session(current) == tty->session)) current 357 drivers/tty/tty_jobctrl.c if (!current->signal->leader || current->signal->tty) { current 451 drivers/tty/tty_jobctrl.c if (tty == real_tty && current->signal->tty != real_tty) current 480 drivers/tty/tty_jobctrl.c if (!current->signal->tty || current 481 drivers/tty/tty_jobctrl.c (current->signal->tty != real_tty) || current 482 drivers/tty/tty_jobctrl.c (real_tty->session != task_session(current))) current 494 drivers/tty/tty_jobctrl.c if (session_of_pgrp(pgrp) != task_session(current)) current 523 drivers/tty/tty_jobctrl.c if (tty == real_tty && current->signal->tty != real_tty) current 541 drivers/tty/tty_jobctrl.c if (current->signal->tty != tty) current 183 drivers/tty/tty_ldsem.c waiter.task = current; current 184 drivers/tty/tty_ldsem.c get_task_struct(current); current 255 drivers/tty/tty_ldsem.c waiter.task = current; current 521 drivers/tty/tty_port.c if (signal_pending(current)) { current 375 drivers/tty/vt/selection.c DECLARE_WAITQUEUE(wait, current); current 391 drivers/tty/vt/selection.c if (signal_pending(current)) { current 3059 drivers/tty/vt/vt.c if (current->signal->tty != tty && !capable(CAP_SYS_ADMIN)) current 367 drivers/tty/vt/vt_ioctl.c if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) current 594 drivers/tty/vt/vt_ioctl.c vt_spawn_con.pid = get_pid(task_pid(current)); current 620 drivers/tty/vt/vt_ioctl.c vc->vt_pid = get_pid(task_pid(current)); current 729 drivers/tty/vt/vt_ioctl.c nvc->vt_pid = get_pid(task_pid(current)); current 1201 drivers/tty/vt/vt_ioctl.c if (current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG)) current 568 drivers/uio/uio.c DECLARE_WAITQUEUE(wait, current); current 605 drivers/uio/uio.c if (signal_pending(current)) { current 966 drivers/usb/class/cdc-acm.c DECLARE_WAITQUEUE(wait, current); current 996 drivers/usb/class/cdc-acm.c if (signal_pending(current)) current 871 drivers/usb/class/usblp.c DECLARE_WAITQUEUE(waita, current); current 913 drivers/usb/class/usblp.c if (signal_pending(current)) current 935 drivers/usb/class/usblp.c DECLARE_WAITQUEUE(waita, current); current 965 drivers/usb/class/usblp.c if (signal_pending(current)) current 826 drivers/usb/core/devio.c "interface %u before use\n", task_pid_nr(current), current 827 drivers/usb/core/devio.c current->comm, ifnum); current 901 drivers/usb/core/devio.c __func__, task_pid_nr(current), current 902 drivers/usb/core/devio.c current->comm, index, index ^ 0x80); current 1047 drivers/usb/core/devio.c ps->disc_pid = get_pid(task_pid(current)); current 1055 drivers/usb/core/devio.c snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current 1056 drivers/usb/core/devio.c current->comm); current 1181 drivers/usb/core/devio.c current->comm, ctrl.bRequestType, ctrl.bRequest, current 1276 drivers/usb/core/devio.c task_pid_nr(current), current->comm, current 1404 drivers/usb/core/devio.c number, interface->dev.driver->name, current->comm); current 1458 drivers/usb/core/devio.c current->comm, u); current 1817 drivers/usb/core/devio.c as->pid = get_pid(task_pid(current)); current 1967 drivers/usb/core/devio.c DECLARE_WAITQUEUE(wait, current); current 1977 drivers/usb/core/devio.c if (signal_pending(current)) current 2000 drivers/usb/core/devio.c if (signal_pending(current)) current 2147 drivers/usb/core/devio.c if (signal_pending(current)) current 2317 drivers/usb/core/devio.c portnum, task_pid_nr(current), current->comm); current 68 drivers/usb/core/message.c current->comm, current 1203 drivers/usb/gadget/function/f_fs.c p->mm = current->mm; current 1249 drivers/usb/gadget/function/f_fs.c p->mm = current->mm; current 698 drivers/usb/gadget/function/f_mass_storage.c if (signal_pending(current)) current 894 drivers/usb/gadget/function/f_mass_storage.c if (signal_pending(current)) current 999 drivers/usb/gadget/function/f_mass_storage.c if (signal_pending(current)) current 1003 drivers/usb/gadget/function/f_mass_storage.c if (signal_pending(current)) current 1033 drivers/usb/gadget/function/f_mass_storage.c if (signal_pending(current)) current 2057 drivers/usb/gadget/function/f_mass_storage.c if (reply == -EINTR || signal_pending(current)) current 2464 drivers/usb/gadget/function/f_mass_storage.c if (exception_in_progress(common) || signal_pending(current)) { current 536 drivers/usb/gadget/legacy/inode.c priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */ current 708 drivers/usb/image/mdc800.c if (signal_pending (current)) current 800 drivers/usb/image/mdc800.c if (signal_pending (current)) current 352 drivers/usb/misc/adutux.c DECLARE_WAITQUEUE(wait, current); current 466 drivers/usb/misc/adutux.c if (signal_pending(current)) { current 508 drivers/usb/misc/adutux.c DECLARE_WAITQUEUE(waita, current); current 544 drivers/usb/misc/adutux.c if (signal_pending(current)) { current 174 drivers/usb/misc/idmouse.c if (signal_pending(current)) { current 290 drivers/usb/misc/uss720.c if (signal_pending (current)) current 1294 drivers/usb/mon/mon_bin.c DECLARE_WAITQUEUE(waita, current); current 1310 drivers/usb/mon/mon_bin.c if (signal_pending(current)) { current 484 drivers/usb/mon/mon_text.c DECLARE_WAITQUEUE(waita, current); current 500 drivers/usb/mon/mon_text.c if (signal_pending(current)) { current 409 drivers/usb/serial/digi_acceleport.c if (interruptible && signal_pending(current)) current 472 drivers/usb/serial/digi_acceleport.c if (signal_pending(current)) current 551 drivers/usb/serial/digi_acceleport.c if (interruptible && signal_pending(current)) current 623 drivers/usb/serial/digi_acceleport.c if (signal_pending(current)) current 292 drivers/usb/serial/generic.c if (signal_pending(current)) current 436 drivers/usb/usbip/stub_dev.c if (usbip_in_eh(current)) current 328 drivers/usb/usbip/usbip_common.c pr_debug("%-10s:", current->comm); current 67 drivers/usb/usbip/usbip_event.c worker_context = current; current 162 drivers/vfio/pci/vfio_pci_nvlink2.c data->mm = current->mm; current 965 drivers/vfio/vfio.c if (signal_pending(current)) { current 971 drivers/vfio/vfio.c current->comm, task_pid_nr(current)); current 1487 drivers/vfio/vfio.c "(%s:%d)\n", current->comm, task_pid_nr(current)); current 76 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm == current->mm) current 80 drivers/vfio/vfio_iommu_spapr_tce.c BUG_ON(!current->mm); current 81 drivers/vfio/vfio_iommu_spapr_tce.c container->mm = current->mm; current 803 drivers/vfio/vfio_iommu_spapr_tce.c if (container->mm && container->mm != current->mm) current 351 drivers/vfio/vfio_iommu_type1.c if (mm == current->mm) { current 407 drivers/vfio/vfio_iommu_type1.c if (!current->mm) current 410 drivers/vfio/vfio_iommu_type1.c ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); current 422 drivers/vfio/vfio_iommu_type1.c if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { current 437 drivers/vfio/vfio_iommu_type1.c ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn); current 449 drivers/vfio/vfio_iommu_type1.c current->mm->locked_vm + lock_acct + 1 > limit) { current 944 drivers/vfio/vfio_iommu_type1.c if (dma->task->mm != current->mm) current 1155 drivers/vfio/vfio_iommu_type1.c get_task_struct(current->group_leader); current 1156 drivers/vfio/vfio_iommu_type1.c dma->task = current->group_leader; current 418 drivers/vhost/net.c !signal_pending(current)); current 499 drivers/vhost/vhost.c return dev->mm == current->mm ? 0 : -EPERM; current 514 drivers/vhost/vhost.c s->ret = cgroup_attach_task_all(s->owner, current); current 521 drivers/vhost/vhost.c attach.owner = current; current 548 drivers/vhost/vhost.c dev->mm = get_task_mm(current); current 549 drivers/vhost/vhost.c worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); current 1224 drivers/vhost/vhost.c if (signal_pending(current)) { current 160 drivers/video/fbdev/cobalt_lcdfb.c if (retval < 0 && signal_pending(current)) current 209 drivers/video/fbdev/cobalt_lcdfb.c if (retval < 0 && signal_pending(current)) current 1058 drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c if (signal_pending(current)) { current 815 drivers/video/fbdev/sa1100fb.c DECLARE_WAITQUEUE(wait, current); current 38 drivers/virt/vboxguest/vboxguest_linux.c if (from_kuid(current_user_ns(), current->cred->uid) == 0) current 78 drivers/w1/w1_family.c flush_signals(current); current 203 drivers/w1/w1_int.c flush_signals(current); current 651 drivers/xen/evtchn.c u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm); current 294 drivers/xen/gntdev.c set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte)); current 608 drivers/xen/gntdev.c priv->mm = get_task_mm(current); current 739 drivers/xen/gntdev.c down_read(¤t->mm->mmap_sem); current 740 drivers/xen/gntdev.c vma = find_vma(current->mm, op.vaddr); current 753 drivers/xen/gntdev.c up_read(¤t->mm->mmap_sem); current 953 drivers/xen/grant-table.c pr_err("%s: %s eagain grant\n", func, current->comm); current 257 drivers/xen/privcmd.c struct mm_struct *mm = current->mm; current 451 drivers/xen/privcmd.c struct mm_struct *mm = current->mm; current 730 drivers/xen/privcmd.c struct mm_struct *mm = current->mm; current 812 drivers/xen/xenbus/xenbus_xs.c if (current->pid != xenwatch_pid) current 825 drivers/xen/xenbus/xenbus_xs.c if (current->pid != xenwatch_pid) current 871 drivers/xen/xenbus/xenbus_xs.c xenwatch_pid = current->pid; current 360 fs/9p/vfs_inode.c __func__, task_pid_nr(current)); current 222 fs/afs/fs_probe.c init_waitqueue_entry(&waits[i], current); current 241 fs/afs/fs_probe.c if (!still_probing || signal_pending(current)) current 264 fs/afs/fs_probe.c if (pref == -1 && signal_pending(current)) current 1449 fs/afs/internal.h printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) current 125 fs/afs/rotate.c if (signal_pending(current)) { current 610 fs/afs/rxrpc.c DECLARE_WAITQUEUE(myself, current); current 222 fs/afs/vl_probe.c init_waitqueue_entry(&waits[i], current); current 241 fs/afs/vl_probe.c if (!still_probing || signal_pending(current)) current 264 fs/afs/vl_probe.c if (pref == -1 && signal_pending(current)) current 26 fs/afs/vl_rotate.c if (signal_pending(current)) { current 313 fs/aio.c pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, current 464 fs/aio.c struct mm_struct *mm = current->mm; current 507 fs/aio.c current->pid, i, page_count(page)); current 704 fs/aio.c struct mm_struct *mm = current->mm; current 1051 fs/aio.c struct mm_struct *mm = current->mm; current 1335 fs/aio.c kill_ioctx(current->mm, ioctx, NULL); current 1367 fs/aio.c kill_ioctx(current->mm, ioctx, NULL); current 1396 fs/aio.c ret = kill_ioctx(current->mm, ioctx, &wait); current 2098 fs/aio.c if (!ret && signal_pending(current)) current 2135 fs/aio.c interrupted = signal_pending(current); current 2171 fs/aio.c interrupted = signal_pending(current); current 2196 fs/aio.c if (!ret && signal_pending(current)) current 2237 fs/aio.c interrupted = signal_pending(current); current 2272 fs/aio.c interrupted = signal_pending(current); current 157 fs/attr.c send_sig(SIGXFSZ, current, 0); current 40 fs/autofs/autofs_i.h #define pr_fmt(fmt) KBUILD_MODNAME ":pid:%d:%s: " fmt, current->pid, __func__ current 147 fs/autofs/autofs_i.h task_pgrp(current) == sbi->oz_pgrp); current 356 fs/autofs/dev-ioctl.c new_pid = get_task_pid(current, PIDTYPE_PGID); current 312 fs/autofs/inode.c sbi->oz_pgrp = get_task_pid(current, PIDTYPE_PGID); current 490 fs/autofs/root.c current->pid, task_pgrp_nr(current), current 877 fs/autofs/root.c cmd, arg, sbi, task_pgrp_nr(current)); current 52 fs/autofs/waitq.c sigpipe = sigismember(¤t->pending.signal, SIGPIPE); current 68 fs/autofs/waitq.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 69 fs/autofs/waitq.c sigdelset(¤t->pending.signal, SIGPIPE); current 71 fs/autofs/waitq.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 367 fs/autofs/waitq.c pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); current 368 fs/autofs/waitq.c tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp)); current 90 fs/binfmt_aout.c current->mm->arg_start = (unsigned long) p; current 99 fs/binfmt_aout.c current->mm->arg_end = current->mm->env_start = (unsigned long) p; current 108 fs/binfmt_aout.c current->mm->env_end = (unsigned long) p; current 166 fs/binfmt_aout.c current->mm->end_code = ex.a_text + current 167 fs/binfmt_aout.c (current->mm->start_code = N_TXTADDR(ex)); current 168 fs/binfmt_aout.c current->mm->end_data = ex.a_data + current 169 fs/binfmt_aout.c (current->mm->start_data = N_DATADDR(ex)); current 170 fs/binfmt_aout.c current->mm->brk = ex.a_bss + current 171 fs/binfmt_aout.c (current->mm->start_brk = N_BSSADDR(ex)); current 242 fs/binfmt_aout.c retval = set_brk(current->mm->start_brk, current->mm->brk); current 246 fs/binfmt_aout.c current->mm->start_stack = current 252 fs/binfmt_aout.c start_thread(regs, ex.a_entry, current->mm->start_stack); current 117 fs/binfmt_elf.c current->mm->start_brk = current->mm->brk = end; current 229 fs/binfmt_elf.c elf_info = (elf_addr_t *)current->mm->saved_auxv; current 279 fs/binfmt_elf.c sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]); current 302 fs/binfmt_elf.c vma = find_extend_vma(current->mm, bprm->p); current 311 fs/binfmt_elf.c p = current->mm->arg_end = current->mm->arg_start; current 323 fs/binfmt_elf.c current->mm->arg_end = p; current 326 fs/binfmt_elf.c current->mm->env_end = current->mm->env_start = p; current 338 fs/binfmt_elf.c current->mm->env_end = p; current 382 fs/binfmt_elf.c task_pid_nr(current), current->comm, (void *)addr); current 855 fs/binfmt_elf.c current->personality |= READ_IMPLIES_EXEC; current 857 fs/binfmt_elf.c if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current 858 fs/binfmt_elf.c current->flags |= PF_RANDOMIZE; current 960 fs/binfmt_elf.c if (current->flags & PF_RANDOMIZE) current 1103 fs/binfmt_elf.c current->mm->end_code = end_code; current 1104 fs/binfmt_elf.c current->mm->start_code = start_code; current 1105 fs/binfmt_elf.c current->mm->start_data = start_data; current 1106 fs/binfmt_elf.c current->mm->end_data = end_data; current 1107 fs/binfmt_elf.c current->mm->start_stack = bprm->p; current 1109 fs/binfmt_elf.c if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) { current 1119 fs/binfmt_elf.c current->mm->brk = current->mm->start_brk = current 1122 fs/binfmt_elf.c current->mm->brk = current->mm->start_brk = current 1123 fs/binfmt_elf.c arch_randomize_brk(current->mm); current 1125 fs/binfmt_elf.c current->brk_randomized = 1; current 1129 fs/binfmt_elf.c if (current->personality & MMAP_PAGE_ZERO) { current 1590 fs/binfmt_elf.c count = current->mm->map_count; current 1608 fs/binfmt_elf.c for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) { current 1645 fs/binfmt_elf.c n = current->mm->map_count - count; current 1764 fs/binfmt_elf.c struct task_struct *dump_task = current; current 1846 fs/binfmt_elf.c fill_auxv_note(&info->auxv, current->mm); current 2019 fs/binfmt_elf.c for (ct = current->mm->core_state->dumper.next; current 2037 fs/binfmt_elf.c fill_prstatus(info->prstatus, current, siginfo->si_signo); current 2050 fs/binfmt_elf.c fill_psinfo(info->psinfo, current->group_leader, current->mm); current 2055 fs/binfmt_elf.c fill_auxv_note(info->notes + 3, current->mm); current 2064 fs/binfmt_elf.c info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, current 2070 fs/binfmt_elf.c if (elf_core_copy_task_xfpregs(current, info->xfpu)) current 2220 fs/binfmt_elf.c segs = current->mm->map_count; current 2223 fs/binfmt_elf.c gate_vma = get_gate_vma(current->mm); current 2273 fs/binfmt_elf.c for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; current 2302 fs/binfmt_elf.c for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; current 2338 fs/binfmt_elf.c for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; current 200 fs/binfmt_elf_fdpic.c kdebug("____ LOAD %d ____", current->pid); current 353 fs/binfmt_elf_fdpic.c current->personality |= READ_IMPLIES_EXEC; current 359 fs/binfmt_elf_fdpic.c current->mm->start_code = 0; current 360 fs/binfmt_elf_fdpic.c current->mm->end_code = 0; current 361 fs/binfmt_elf_fdpic.c current->mm->start_stack = 0; current 362 fs/binfmt_elf_fdpic.c current->mm->start_data = 0; current 363 fs/binfmt_elf_fdpic.c current->mm->end_data = 0; current 364 fs/binfmt_elf_fdpic.c current->mm->context.exec_fdpic_loadmap = 0; current 365 fs/binfmt_elf_fdpic.c current->mm->context.interp_fdpic_loadmap = 0; current 370 fs/binfmt_elf_fdpic.c ¤t->mm->start_stack, current 371 fs/binfmt_elf_fdpic.c ¤t->mm->start_brk); current 373 fs/binfmt_elf_fdpic.c retval = setup_arg_pages(bprm, current->mm->start_stack, current 385 fs/binfmt_elf_fdpic.c retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, current 392 fs/binfmt_elf_fdpic.c current->mm, "interpreter"); current 404 fs/binfmt_elf_fdpic.c if (!current->mm->start_brk) current 405 fs/binfmt_elf_fdpic.c current->mm->start_brk = current->mm->end_data; current 407 fs/binfmt_elf_fdpic.c current->mm->brk = current->mm->start_brk = current 408 fs/binfmt_elf_fdpic.c PAGE_ALIGN(current->mm->start_brk); current 421 fs/binfmt_elf_fdpic.c current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot, current 426 fs/binfmt_elf_fdpic.c if (IS_ERR_VALUE(current->mm->start_brk)) { current 427 fs/binfmt_elf_fdpic.c retval = current->mm->start_brk; current 428 fs/binfmt_elf_fdpic.c current->mm->start_brk = 0; current 432 fs/binfmt_elf_fdpic.c current->mm->brk = current->mm->start_brk; current 433 fs/binfmt_elf_fdpic.c current->mm->context.end_brk = current->mm->start_brk; current 434 fs/binfmt_elf_fdpic.c current->mm->start_stack = current->mm->start_brk + stack_size; current 438 fs/binfmt_elf_fdpic.c if (create_elf_fdpic_tables(bprm, current->mm, current 442 fs/binfmt_elf_fdpic.c kdebug("- start_code %lx", current->mm->start_code); current 443 fs/binfmt_elf_fdpic.c kdebug("- end_code %lx", current->mm->end_code); current 444 fs/binfmt_elf_fdpic.c kdebug("- start_data %lx", current->mm->start_data); current 445 fs/binfmt_elf_fdpic.c kdebug("- end_data %lx", current->mm->end_data); current 446 fs/binfmt_elf_fdpic.c kdebug("- start_brk %lx", current->mm->start_brk); current 447 fs/binfmt_elf_fdpic.c kdebug("- brk %lx", current->mm->brk); current 448 fs/binfmt_elf_fdpic.c kdebug("- start_stack %lx", current->mm->start_stack); current 465 fs/binfmt_elf_fdpic.c start_thread(regs, entryaddr, current->mm->start_stack); current 570 fs/binfmt_elf_fdpic.c current->mm->context.exec_fdpic_loadmap = (unsigned long) sp; current 583 fs/binfmt_elf_fdpic.c current->mm->context.interp_fdpic_loadmap = (unsigned long) sp; current 684 fs/binfmt_elf_fdpic.c current->mm->arg_start = bprm->p; current 686 fs/binfmt_elf_fdpic.c current->mm->arg_start = current->mm->start_stack - current 690 fs/binfmt_elf_fdpic.c p = (char __user *) current->mm->arg_start; current 699 fs/binfmt_elf_fdpic.c current->mm->arg_end = (unsigned long) p; current 702 fs/binfmt_elf_fdpic.c current->mm->env_start = (unsigned long) p; current 711 fs/binfmt_elf_fdpic.c current->mm->env_end = (unsigned long) p; current 1497 fs/binfmt_elf_fdpic.c for (vma = current->mm->mmap; vma; vma = vma->vm_next) { current 1535 fs/binfmt_elf_fdpic.c for (vma = current->mm->mmap; vma; vma = vma->vm_next) current 1612 fs/binfmt_elf_fdpic.c for (ct = current->mm->core_state->dumper.next; current 1632 fs/binfmt_elf_fdpic.c fill_prstatus(prstatus, current, cprm->siginfo->si_signo); current 1635 fs/binfmt_elf_fdpic.c segs = current->mm->map_count; current 1656 fs/binfmt_elf_fdpic.c fill_psinfo(psinfo, current->group_leader, current->mm); current 1661 fs/binfmt_elf_fdpic.c auxv = (elf_addr_t *) current->mm->saved_auxv; current 1672 fs/binfmt_elf_fdpic.c elf_core_copy_task_fpregs(current, cprm->regs, fpu))) current 1676 fs/binfmt_elf_fdpic.c if (elf_core_copy_task_xfpregs(current, xfpu)) current 1727 fs/binfmt_elf_fdpic.c for (vma = current->mm->mmap; vma; vma = vma->vm_next) { current 112 fs/binfmt_flat.c current->comm, current->pid, cprm->siginfo->si_signo); current 130 fs/binfmt_flat.c sp = (unsigned long __user *)current->mm->start_stack; current 138 fs/binfmt_flat.c current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN; current 139 fs/binfmt_flat.c sp = (unsigned long __user *)current->mm->start_stack; current 150 fs/binfmt_flat.c current->mm->arg_start = (unsigned long)p; current 159 fs/binfmt_flat.c current->mm->arg_end = (unsigned long)p; current 161 fs/binfmt_flat.c current->mm->env_start = (unsigned long) p; current 170 fs/binfmt_flat.c current->mm->env_end = (unsigned long)p; current 375 fs/binfmt_flat.c pr_cont(", killing %s!\n", current->comm); current 376 fs/binfmt_flat.c send_sig(SIGSEGV, current, 0); current 393 fs/binfmt_flat.c ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset); current 395 fs/binfmt_flat.c ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset); current 405 fs/binfmt_flat.c val += current->mm->start_code; current 408 fs/binfmt_flat.c val += current->mm->start_data; current 411 fs/binfmt_flat.c val += current->mm->end_data; current 719 fs/binfmt_flat.c current->mm->start_code = start_code; current 720 fs/binfmt_flat.c current->mm->end_code = end_code; current 721 fs/binfmt_flat.c current->mm->start_data = datapos; current 722 fs/binfmt_flat.c current->mm->end_data = datapos + data_len; current 730 fs/binfmt_flat.c current->mm->start_brk = datapos + data_len + bss_len; current 731 fs/binfmt_flat.c current->mm->brk = (current->mm->start_brk + 3) & ~3; current 733 fs/binfmt_flat.c current->mm->context.end_brk = memp + memp_size - stack_len; current 976 fs/binfmt_flat.c current->mm->start_stack = current 977 fs/binfmt_flat.c ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4; current 978 fs/binfmt_flat.c pr_debug("sp=%lx\n", current->mm->start_stack); current 981 fs/binfmt_flat.c res = transfer_args_to_stack(bprm, ¤t->mm->start_stack); current 983 fs/binfmt_flat.c res = create_flat_tables(bprm, current->mm->start_stack); current 999 fs/binfmt_flat.c current->mm->start_stack -= sizeof(unsigned long); current 1000 fs/binfmt_flat.c sp = (unsigned long __user *)current->mm->start_stack; current 1013 fs/binfmt_flat.c regs, start_addr, current->mm->start_stack); current 1014 fs/binfmt_flat.c start_thread(regs, start_addr, current->mm->start_stack); current 232 fs/block_dev.c bio.bi_private = current; current 362 fs/block_dev.c dio->waiter = current; current 3410 fs/btrfs/ctree.h return signal_pending(current); current 27 fs/btrfs/delalloc-space.c ASSERT(current->journal_info); current 325 fs/btrfs/delalloc-space.c if (current->journal_info) current 307 fs/btrfs/disk-io.c bool need_lock = (current->journal_info == BTRFS_SEND_TRANS_STUB); current 4174 fs/btrfs/disk-io.c if (current->flags & PF_MEMALLOC) current 3704 fs/btrfs/extent-tree.c trans = current->journal_info; current 4436 fs/btrfs/extent-tree.c if (buf->lock_owner == current->pid) { current 4439 fs/btrfs/extent-tree.c buf->start, btrfs_header_owner(buf), current->pid); current 5623 fs/btrfs/extent-tree.c if (fatal_signal_pending(current)) { current 1606 fs/btrfs/file.c nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); current 1934 fs/btrfs/file.c current->backing_dev_info = inode_to_bdi(inode); current 2006 fs/btrfs/file.c current->backing_dev_info = NULL; current 3269 fs/btrfs/free-space-cache.c if (fatal_signal_pending(current)) { current 3350 fs/btrfs/free-space-cache.c if (fatal_signal_pending(current)) { current 7842 fs/btrfs/inode.c current->journal_info = dio_data; current 7865 fs/btrfs/inode.c if (current->journal_info) { current 7871 fs/btrfs/inode.c dio_data = current->journal_info; current 7872 fs/btrfs/inode.c current->journal_info = NULL; current 7951 fs/btrfs/inode.c current->journal_info = dio_data; current 8646 fs/btrfs/inode.c struct btrfs_dio_data *dio_data = current->journal_info; current 8793 fs/btrfs/inode.c current->journal_info = &dio_data; current 8808 fs/btrfs/inode.c current->journal_info = NULL; current 8867 fs/btrfs/inode.c if (current->flags & PF_MEMALLOC) { current 3723 fs/btrfs/ioctl.c if (fatal_signal_pending(current)) { current 97 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) current 113 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) current 136 fs/btrfs/locking.c current->pid == eb->lock_owner); current 137 fs/btrfs/locking.c if (eb->blocking_writers && current->pid == eb->lock_owner) { current 220 fs/btrfs/locking.c eb->lock_owner = current->pid; current 237 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) { current 259 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) { current 282 fs/btrfs/locking.c WARN_ON(eb->lock_owner == current->pid); current 293 fs/btrfs/locking.c eb->lock_owner = current->pid; current 162 fs/btrfs/print-tree.c eb->lock_owner, current->pid); current 759 fs/btrfs/reada.c old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current), current 760 fs/btrfs/reada.c task_nice_ioprio(current)); current 761 fs/btrfs/reada.c set_task_ioprio(current, BTRFS_IOPRIO_READA); current 763 fs/btrfs/reada.c set_task_ioprio(current, old_ioprio); current 7288 fs/btrfs/send.c current->journal_info = BTRFS_SEND_TRANS_STUB; current 7290 fs/btrfs/send.c current->journal_info = NULL; current 332 fs/btrfs/space-info.c if (!current->journal_info) current 372 fs/btrfs/space-info.c trans = (struct btrfs_trans_handle *)current->journal_info; current 470 fs/btrfs/space-info.c trans = (struct btrfs_trans_handle *)current->journal_info; current 1004 fs/btrfs/space-info.c ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL); current 457 fs/btrfs/transaction.c ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB); current 462 fs/btrfs/transaction.c if (current->journal_info) { current 464 fs/btrfs/transaction.c h = current->journal_info; current 579 fs/btrfs/transaction.c current->journal_info = h; current 593 fs/btrfs/transaction.c if (!current->journal_info) current 594 fs/btrfs/transaction.c current->journal_info = h; current 881 fs/btrfs/transaction.c if (current->journal_info == trans) current 882 fs/btrfs/transaction.c current->journal_info = NULL; current 1780 fs/btrfs/transaction.c current->journal_info = ac->newtrans; current 1826 fs/btrfs/transaction.c if (current->journal_info == trans) current 1827 fs/btrfs/transaction.c current->journal_info = NULL; current 1877 fs/btrfs/transaction.c if (current->journal_info == trans) current 1878 fs/btrfs/transaction.c current->journal_info = NULL; current 2332 fs/btrfs/transaction.c if (current->journal_info == trans) current 2333 fs/btrfs/transaction.c current->journal_info = NULL; current 2347 fs/btrfs/transaction.c if (current->journal_info == trans) current 2348 fs/btrfs/transaction.c current->journal_info = NULL; current 154 fs/btrfs/tree-log.c root->log_start_pid = current->pid; current 155 fs/btrfs/tree-log.c } else if (root->log_start_pid != current->pid) { current 171 fs/btrfs/tree-log.c root->log_start_pid = current->pid; current 653 fs/btrfs/volumes.c ioc = current->io_context; current 5531 fs/btrfs/volumes.c preferred_mirror = first + current->pid % num_stripes; current 7599 fs/btrfs/volumes.c current->comm, task_pid_nr(current)); current 2378 fs/buffer.c if (fatal_signal_pending(current)) { current 3312 fs/buffer.c " system call\n", current->comm); current 572 fs/cachefiles/daemon.c get_fs_pwd(current->fs, &path); current 644 fs/cachefiles/daemon.c get_fs_pwd(current->fs, &path); current 280 fs/cachefiles/internal.h printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) current 23 fs/cachefiles/security.c new = prepare_kernel_cred(current); current 606 fs/ceph/addr.c WARN_ON(!(current->flags & PF_MEMALLOC)); current 2777 fs/ceph/caps.c if (signal_pending(current)) { current 1445 fs/ceph/file.c current->backing_dev_info = inode_to_bdi(inode); current 1599 fs/ceph/file.c current->backing_dev_info = NULL; current 2934 fs/ceph/mds_client.c current->journal_info = req; current 2941 fs/ceph/mds_client.c current->journal_info = NULL; current 747 fs/ceph/super.h .thread = current, \ current 773 fs/ceph/super.h if (ctx->thread == current) { current 808 fs/ceph/xattr.c struct ceph_mds_request *req = current->journal_info; current 863 fs/ceph/xattr.c if (current->journal_info) { current 895 fs/ceph/xattr.c if (current->journal_info && current 1130 fs/ceph/xattr.c if (current->journal_info) { current 169 fs/cifs/cifs_spnego.c sprintf(dp, ";pid=0x%x", current->pid); current 1166 fs/cifs/connect.c current->flags |= PF_MEMALLOC; current 1167 fs/cifs/connect.c cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current)); current 1314 fs/cifs/connect.c while (!signal_pending(current)) { current 2659 fs/cifs/connect.c if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) current 2772 fs/cifs/connect.c cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); current 402 fs/cifs/dir.c current->tgid); current 80 fs/cifs/dns_resolve.c rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len, current 83 fs/cifs/file.c current->comm, current->tgid); current 320 fs/cifs/file.c cfile->pid = current->tgid; current 948 fs/cifs/file.c lock->pid = current->tgid; current 984 fs/cifs/file.c if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid && current 993 fs/cifs/file.c current->tgid == li->pid) || type == li->type)) current 1574 fs/cifs/file.c if (current->tgid != li->pid) current 1957 fs/cifs/file.c if (!any_available && open_file->pid != current->tgid) current 2500 fs/cifs/file.c pid = current->tgid; current 2839 fs/cifs/file.c pid = current->tgid; current 3565 fs/cifs/file.c pid = current->tgid; current 3985 fs/cifs/file.c pid = current->tgid; current 4313 fs/cifs/file.c pid = current->tgid; current 496 fs/cifs/inode.c io_parms.pid = current->tgid; current 1252 fs/cifs/inode.c current->tgid); current 1273 fs/cifs/inode.c current->tgid); current 1310 fs/cifs/inode.c current->tgid)) current 1954 fs/cifs/inode.c if (signal_pending_state(mode, current)) current 335 fs/cifs/link.c io_parms.pid = current->tgid; current 375 fs/cifs/link.c io_parms.pid = current->tgid; current 436 fs/cifs/link.c io_parms.pid = current->tgid; current 490 fs/cifs/link.c io_parms.pid = current->tgid; current 263 fs/cifs/misc.c buffer->Pid = cpu_to_le16((__u16)current->tgid); current 264 fs/cifs/misc.c buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16)); current 812 fs/cifs/smb1ops.c netpid = current->tgid; current 870 fs/cifs/smb1ops.c return CIFSSMBLock(0, tcon, fid->netfid, current->tgid, 0, 0, 0, 0, current 912 fs/cifs/smb1ops.c current->tgid, length, offset, unlock, lock, current 1121 fs/cifs/smb1ops.c io_parms.pid = current->tgid; current 154 fs/cifs/smb2file.c if (current->tgid != li->pid) current 178 fs/cifs/smb2file.c current->tgid, num, buf); current 201 fs/cifs/smb2file.c cfile->fid.volatile_fid, current->tgid, current 235 fs/cifs/smb2file.c current->tgid, num, buf); current 247 fs/cifs/smb2file.c current->tgid, num, buf); current 176 fs/cifs/smb2inode.c COMPOUND_FID, current->tgid, current 194 fs/cifs/smb2inode.c COMPOUND_FID, current->tgid, current 215 fs/cifs/smb2inode.c cfile->fid.volatile_fid, current->tgid, current 221 fs/cifs/smb2inode.c COMPOUND_FID, current->tgid, current 257 fs/cifs/smb2inode.c current->tgid, FILE_RENAME_INFORMATION, current 262 fs/cifs/smb2inode.c current->tgid, FILE_RENAME_INFORMATION, current 292 fs/cifs/smb2inode.c COMPOUND_FID, current->tgid, current 1197 fs/cifs/smb2ops.c COMPOUND_FID, current->tgid, current 1521 fs/cifs/smb2ops.c current->tgid, current 2389 fs/cifs/smb2ops.c current->tgid, length, offset, type, wait); current 4448 fs/cifs/smb2ops.c io_parms.pid = current->tgid; current 121 fs/cifs/smb2pdu.c shdr->ProcessId = cpu_to_le32((__u16)current->tgid); current 4482 fs/cifs/smb2pdu.c current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag, current 4492 fs/cifs/smb2pdu.c current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, current 593 fs/cifs/smb2transport.c temp->pid = current->pid; current 602 fs/cifs/smb2transport.c get_task_struct(current); current 603 fs/cifs/smb2transport.c temp->creator = current; current 605 fs/cifs/smb2transport.c temp->callback_data = current; current 67 fs/cifs/transport.c temp->pid = current->pid; current 79 fs/cifs/transport.c get_task_struct(current); current 80 fs/cifs/transport.c temp->creator = current; current 82 fs/cifs/transport.c temp->callback_data = current; current 342 fs/cifs/transport.c if (signal_pending(current)) { current 432 fs/cifs/transport.c if (signal_pending(current) && (total_len != send_length)) { current 155 fs/coda/inode.c if (task_active_pid_ns(current) != &init_pid_ns) current 211 fs/coda/psdev.c DECLARE_WAITQUEUE(wait, current); current 229 fs/coda/psdev.c if (signal_pending(current)) { current 277 fs/coda/psdev.c if (task_active_pid_ns(current) != &init_pid_ns) current 54 fs/coda/upcall.c inp->ih.pid = task_pid_nr_ns(current, &init_pid_ns); current 55 fs/coda/upcall.c inp->ih.pgid = task_pgrp_nr_ns(current, &init_pid_ns); current 618 fs/coda/upcall.c spin_lock_irq(¤t->sighand->siglock); current 619 fs/coda/upcall.c *old = current->blocked; current 621 fs/coda/upcall.c sigfillset(¤t->blocked); current 622 fs/coda/upcall.c sigdelset(¤t->blocked, SIGKILL); current 623 fs/coda/upcall.c sigdelset(¤t->blocked, SIGSTOP); current 624 fs/coda/upcall.c sigdelset(¤t->blocked, SIGINT); current 627 fs/coda/upcall.c spin_unlock_irq(¤t->sighand->siglock); current 632 fs/coda/upcall.c spin_lock_irq(¤t->sighand->siglock); current 633 fs/coda/upcall.c current->blocked = *old; current 635 fs/coda/upcall.c spin_unlock_irq(¤t->sighand->siglock); current 654 fs/coda/upcall.c DECLARE_WAITQUEUE(wait, current); current 680 fs/coda/upcall.c if (signal_pending(current)) { current 774 fs/coda/upcall.c if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) { current 162 fs/coredump.c exe_file = get_mm_exe_file(current->mm); current 164 fs/coredump.c return cn_esc_printf(cn, "%s (path unknown)", current->comm); current 253 fs/coredump.c task_tgid_vnr(current)); current 258 fs/coredump.c task_tgid_nr(current)); current 262 fs/coredump.c task_pid_vnr(current)); current 266 fs/coredump.c task_pid_nr(current)); current 306 fs/coredump.c err = cn_esc_printf(cn, "%s", current->comm); current 333 fs/coredump.c err = cn_printf(cn, ".%d", task_tgid_vnr(current)); current 352 fs/coredump.c if (t != current && t->mm) { current 440 fs/coredump.c struct task_struct *tsk = current; current 481 fs/coredump.c spin_lock_irq(¤t->sighand->siglock); current 482 fs/coredump.c if (core_dumped && !__fatal_signal_pending(current)) current 483 fs/coredump.c current->signal->group_exit_code |= 0x80; current 484 fs/coredump.c current->signal->group_exit_task = NULL; current 485 fs/coredump.c current->signal->flags = SIGNAL_GROUP_EXIT; current 486 fs/coredump.c spin_unlock_irq(¤t->sighand->siglock); current 512 fs/coredump.c return signal_pending(current); current 562 fs/coredump.c current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; current 571 fs/coredump.c struct mm_struct *mm = current->mm; current 657 fs/coredump.c task_tgid_vnr(current), current->comm); current 666 fs/coredump.c task_tgid_vnr(current), current->comm); current 707 fs/coredump.c task_tgid_vnr(current), current->comm); current 255 fs/crypto/policy.c current->comm, current->pid); current 280 fs/d_path.c get_fs_root_rcu(current->fs, &root); current 435 fs/d_path.c get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); current 1136 fs/dax.c if (fatal_signal_pending(current)) { current 2501 fs/dcache.c DECLARE_WAITQUEUE(wait, current); current 265 fs/devpts/inode.c (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns); current 239 fs/direct-io.c pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, current 240 fs/direct-io.c current->comm); current 518 fs/direct-io.c dio->waiter = current; current 2918 fs/dlm/lock.c lkb->lkb_ownpid = (int) current->pid; current 5936 fs/dlm/lock.c lkb->lkb_ownpid = (int) current->pid; current 6297 fs/dlm/lock.c if (pid == current->pid) current 467 fs/dlm/user.c current->comm, current 468 fs/dlm/user.c task_pid_nr(current), current 781 fs/dlm/user.c DECLARE_WAITQUEUE(wait, current); current 820 fs/dlm/user.c if (list_empty(&proc->asts) && !signal_pending(current)) { current 829 fs/dlm/user.c if (signal_pending(current)) { current 70 fs/drop_caches.c current->comm, task_pid_nr(current), current 55 fs/ecryptfs/messaging.c (*msg_ctx)->task = current; current 119 fs/ecryptfs/read_write.c if (fatal_signal_pending(current)) { current 225 fs/eventfd.c DECLARE_WAITQUEUE(wait, current); current 242 fs/eventfd.c if (signal_pending(current)) { current 272 fs/eventfd.c DECLARE_WAITQUEUE(wait, current); current 292 fs/eventfd.c if (signal_pending(current)) { current 1416 fs/eventpoll.c current); current 1449 fs/eventpoll.c current_file, current); current 1903 fs/eventpoll.c if (fatal_signal_pending(current)) { current 1911 fs/eventpoll.c if (signal_pending(current)) { current 1982 fs/eventpoll.c ep_tovisit, current); current 2021 fs/eventpoll.c ep_loop_check_proc, file, ep, current); current 185 fs/exec.c struct mm_struct *mm = current->mm; current 217 fs/exec.c ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, current 370 fs/exec.c task_lock(current->group_leader); current 371 fs/exec.c bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK]; current 372 fs/exec.c task_unlock(current->group_leader); current 443 fs/exec.c if (fatal_signal_pending(current)) current 538 fs/exec.c if (fatal_signal_pending(current)) { current 697 fs/exec.c struct mm_struct *mm = current->mm; current 793 fs/exec.c current->mm->start_stack = bprm->p; current 1016 fs/exec.c tsk = current; current 1017 fs/exec.c old_mm = current->mm; current 1266 fs/exec.c retval = de_thread(current); current 1296 fs/exec.c current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | current 1299 fs/exec.c current->personality &= ~bprm->per_clear; current 1307 fs/exec.c do_close_on_exec(current->files); current 1347 fs/exec.c current->pdeath_signal = 0; current 1360 fs/exec.c arch_pick_mmap_layout(current->mm, &bprm->rlim_stack); current 1362 fs/exec.c current->sas_ss_sp = current->sas_ss_size = 0; current 1372 fs/exec.c set_dumpable(current->mm, suid_dumpable); current 1374 fs/exec.c set_dumpable(current->mm, SUID_DUMP_USER); current 1378 fs/exec.c __set_task_comm(current, kbasename(bprm->filename), true); current 1384 fs/exec.c current->mm->task_size = TASK_SIZE; current 1388 fs/exec.c WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1); current 1389 fs/exec.c flush_signal_handlers(current, 0); current 1397 fs/exec.c task_lock(current->group_leader); current 1398 fs/exec.c current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack; current 1399 fs/exec.c task_unlock(current->group_leader); current 1411 fs/exec.c if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) current 1418 fs/exec.c mutex_unlock(¤t->signal->cred_guard_mutex); current 1426 fs/exec.c mutex_unlock(¤t->signal->cred_guard_mutex); current 1467 fs/exec.c if (get_dumpable(current->mm) != SUID_DUMP_USER) current 1468 fs/exec.c perf_event_exit_task(current); current 1475 fs/exec.c mutex_unlock(¤t->signal->cred_guard_mutex); current 1486 fs/exec.c struct task_struct *p = current, *t; current 1496 fs/exec.c if (task_no_new_privs(current)) current 1535 fs/exec.c if (task_no_new_privs(current)) current 1699 fs/exec.c old_pid = current->pid; current 1701 fs/exec.c old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); current 1707 fs/exec.c trace_sched_process_exec(current, old_pid, bprm); current 1709 fs/exec.c proc_exec_connector(current); current 1737 fs/exec.c if ((current->flags & PF_NPROC_EXCEEDED) && current 1745 fs/exec.c current->flags &= ~PF_NPROC_EXCEEDED; current 1761 fs/exec.c current->in_execve = 1; current 1791 fs/exec.c if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) current 1827 fs/exec.c current->fs->in_exec = 0; current 1828 fs/exec.c current->in_execve = 0; current 1829 fs/exec.c rseq_execve(current); current 1830 fs/exec.c acct_update_integrals(current); current 1831 fs/exec.c task_numa_free(current, false); current 1847 fs/exec.c current->fs->in_exec = 0; current 1848 fs/exec.c current->in_execve = 0; current 1934 fs/exec.c struct mm_struct *mm = current->mm; current 316 fs/ext2/inode.c colour = (current->pid % 16) * current 904 fs/ext4/balloc.c colour = (current->pid % 16) * current 907 fs/ext4/balloc.c colour = (current->pid % 16) * ((last_block - bg_start) / 16); current 160 fs/ext4/dir.c if (fatal_signal_pending(current)) { current 517 fs/ext4/dir.c inode->i_ino, current->comm); current 13 fs/ext4/ext4_jbd2.c handle_t *handle = current->journal_info; current 21 fs/ext4/ext4_jbd2.c current->journal_info = handle; current 36 fs/ext4/ext4_jbd2.c current->journal_info = handle; current 95 fs/ext4/fsmap.c if (fatal_signal_pending(current)) current 2168 fs/ext4/inode.c if ((current->flags & PF_MEMALLOC) || current 2175 fs/ext4/inode.c WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) current 4852 fs/ext4/inode.c ino, current->comm); current 5425 fs/ext4/inode.c if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) || current 3074 fs/ext4/mballoc.c current->pid, ac->ac_g_ex.fe_len); current 5221 fs/ext4/mballoc.c if (fatal_signal_pending(current)) { current 118 fs/ext4/namei.c current->comm, PTR_ERR(bh)); current 1151 fs/ext4/namei.c if (fatal_signal_pending(current)) { current 513 fs/ext4/super.c sb->s_id, function, line, current->comm, &vaf); current 542 fs/ext4/super.c block, current->comm, &vaf); current 547 fs/ext4/super.c current->comm, &vaf); current 582 fs/ext4/super.c block, current->comm, path, &vaf); current 588 fs/ext4/super.c current->comm, path, &vaf); current 762 fs/ext4/super.c function, line, inode->i_ino, current->comm, &vaf); current 1066 fs/f2fs/checkpoint.c F2FS_I(inode)->cp_task = current; current 318 fs/f2fs/data.c if (test_opt(sbi, LFS) && current->plug) current 319 fs/f2fs/data.c blk_finish_plug(current->plug); current 1617 fs/f2fs/data.c if (fatal_signal_pending(current)) current 2492 fs/f2fs/data.c F2FS_I(inode)->cp_task == current ? current 342 fs/f2fs/dir.c F2FS_I(dir)->task = current; current 746 fs/f2fs/dir.c if (current != F2FS_I(dir)->task) { current 1005 fs/f2fs/dir.c if (fatal_signal_pending(current)) { current 1662 fs/f2fs/file.c F2FS_I(inode)->inmem_task == current) current 1897 fs/f2fs/file.c F2FS_I(inode)->inmem_task = current; current 35 fs/f2fs/gc.c kthread_should_stop() || freezing(current) || current 1707 fs/f2fs/segment.c kthread_should_stop() || freezing(current) || current 2805 fs/f2fs/segment.c if (fatal_signal_pending(current)) current 56 fs/f2fs/trace.c pid_t pid = task_pid_nr(current); current 67 fs/f2fs/trace.c if (p == current) current 72 fs/f2fs/trace.c if (radix_tree_insert(&pids, pid, current)) { current 81 fs/f2fs/trace.c pid, current->comm); current 765 fs/fat/fatent.c if (fatal_signal_pending(current)) { current 120 fs/fhandle.c struct fs_struct *fs = current->fs; current 427 fs/file.c struct task_struct *tsk = current; current 540 fs/file.c return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags); current 545 fs/file.c return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags); current 559 fs/file.c struct files_struct *files = current->files; current 613 fs/file.c __fd_install(current->files, fd, file); current 649 fs/file.c struct files_struct *files = current->files; current 711 fs/file.c struct files_struct *files = current->files; current 767 fs/file.c struct files_struct *files = current->files; current 820 fs/file.c struct files_struct *files = current->files; current 833 fs/file.c struct files_struct *files = current->files; current 890 fs/file.c struct files_struct *files = current->files; current 913 fs/file.c struct files_struct *files = current->files; current 951 fs/file.c struct files_struct *files = current->files; current 337 fs/file_table.c struct task_struct *task = current; current 371 fs/file_table.c struct task_struct *task = current; current 262 fs/fs-writeback.c memcg_css = task_get_css(current, memory_cgrp_id); current 1731 fs/fs-writeback.c blk_flush_plug(current); current 2067 fs/fs-writeback.c current->flags |= PF_SWAPWRITE; current 2097 fs/fs-writeback.c current->flags &= ~PF_SWAPWRITE; current 2134 fs/fs-writeback.c if (blk_needs_flush_plug(current)) current 2135 fs/fs-writeback.c blk_schedule_flush_plug(current); current 2208 fs/fs-writeback.c current->comm, task_pid_nr(current), inode->i_ino, current 270 fs/fs_context.c fc->net_ns = get_net(current->nsproxy->net_ns); current 135 fs/fs_struct.c struct fs_struct *fs = current->fs; current 142 fs/fs_struct.c task_lock(current); current 145 fs/fs_struct.c current->fs = new_fs; current 147 fs/fs_struct.c task_unlock(current); current 158 fs/fs_struct.c return current->fs->umask; current 374 fs/fscache/internal.h printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) current 122 fs/fsopen.c if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN)) current 165 fs/fsopen.c if (!ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN)) current 138 fs/fuse/dev.c req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); current 464 fs/fuse/dev.c req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); current 1566 fs/fuse/dir.c inarg.lock_owner = fuse_lock_owner_id(fc, current->files); current 1290 fs/fuse/file.c current->backing_dev_info = inode_to_bdi(inode); current 1336 fs/fuse/file.c current->backing_dev_info = NULL; current 1457 fs/fuse/file.c fl_owner_t owner = current->files; current 2405 fs/fuse/file.c struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; current 627 fs/fuse/inode.c fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); current 100 fs/gfs2/aops.c if (current->journal_info) current 190 fs/gfs2/aops.c if (PageChecked(page) || current->journal_info) current 1045 fs/gfs2/bmap.c struct gfs2_trans *tr = current->journal_info; current 1128 fs/gfs2/bmap.c tr = current->journal_info; current 1394 fs/gfs2/bmap.c tr = current->journal_info; current 1454 fs/gfs2/bmap.c if (current->journal_info) current 1551 fs/gfs2/bmap.c if (current->journal_info == NULL) { current 1573 fs/gfs2/bmap.c tr = current->journal_info; current 1621 fs/gfs2/bmap.c if (current->journal_info) { current 1984 fs/gfs2/bmap.c if (current->journal_info == NULL) { current 2004 fs/gfs2/bmap.c if (current->journal_info) { current 2428 fs/gfs2/bmap.c tr = current->journal_info; current 2484 fs/gfs2/bmap.c BUG_ON(!current->journal_info); current 2492 fs/gfs2/bmap.c if (current->journal_info) current 2499 fs/gfs2/bmap.c if (current->journal_info) current 875 fs/gfs2/file.c current->backing_dev_info = inode_to_bdi(inode); current 877 fs/gfs2/file.c current->backing_dev_info = NULL; current 896 fs/gfs2/file.c current->backing_dev_info = inode_to_bdi(inode); current 898 fs/gfs2/file.c current->backing_dev_info = NULL; current 897 fs/gfs2/glock.c gh->gh_owner_pid = get_pid(task_pid(current)); current 922 fs/gfs2/glock.c gh->gh_owner_pid = get_pid(task_pid(current)); current 143 fs/gfs2/glock.h pid = task_pid(current); current 126 fs/gfs2/glops.c WARN_ON_ONCE(current->journal_info); current 127 fs/gfs2/glops.c current->journal_info = &tr; current 1852 fs/gfs2/inode.c if (current->journal_info) current 783 fs/gfs2/lock_dlm.c if (retries++ && signal_pending(current)) { current 45 fs/gfs2/lops.c BUG_ON(!current->journal_info); current 291 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; current 318 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; current 334 fs/gfs2/meta_io.c struct gfs2_trans *tr = current->journal_info; current 568 fs/gfs2/super.c if (current->journal_info == NULL) { current 892 fs/gfs2/super.c if (signal_pending(current)) current 1010 fs/gfs2/super.c unlikely(current->flags & PF_MEMALLOC) && current 1213 fs/gfs2/super.c if (current->flags & PF_MEMALLOC) current 1264 fs/gfs2/super.c if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) current 34 fs/gfs2/trans.c BUG_ON(current->journal_info); current 63 fs/gfs2/trans.c current->journal_info = tr; current 88 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; current 92 fs/gfs2/trans.c current->journal_info = NULL; current 152 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; current 193 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; current 245 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; current 256 fs/gfs2/trans.c struct gfs2_trans *tr = current->journal_info; current 507 fs/hpfs/alloc.c if (fatal_signal_pending(current)) current 202 fs/hugetlbfs/inode.c struct mm_struct *mm = current->mm; current 427 fs/hugetlbfs/inode.c vma_init(&pseudo_vma, current->mm); current 578 fs/hugetlbfs/inode.c struct mm_struct *mm = current->mm; current 635 fs/hugetlbfs/inode.c if (signal_pending(current)) { current 1388 fs/hugetlbfs/inode.c task_lock(current); current 1390 fs/hugetlbfs/inode.c current->comm, current->pid); current 1391 fs/hugetlbfs/inode.c task_unlock(current); current 775 fs/inode.c if (current->reclaim_state) current 776 fs/inode.c current->reclaim_state->reclaimed_slab += reap; current 1513 fs/io_uring.c current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; current 1521 fs/io_uring.c current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; current 2193 fs/io_uring.c struct fs_struct *old_fs_struct = current->fs; current 2213 fs/io_uring.c if (req->fs != current->fs && current->fs != old_fs_struct) { current 2214 fs/io_uring.c task_lock(current); current 2216 fs/io_uring.c current->fs = req->fs; current 2218 fs/io_uring.c current->fs = old_fs_struct; current 2219 fs/io_uring.c task_unlock(current); current 2321 fs/io_uring.c task_lock(current); current 2322 fs/io_uring.c current->fs = old_fs_struct; current 2323 fs/io_uring.c task_unlock(current); current 2556 fs/io_uring.c spin_lock(¤t->fs->lock); current 2557 fs/io_uring.c if (!current->fs->in_exec) { current 2558 fs/io_uring.c req->fs = current->fs; current 2561 fs/io_uring.c spin_unlock(¤t->fs->lock); current 2833 fs/io_uring.c if (signal_pending(current)) current 2834 fs/io_uring.c flush_signals(current); current 2975 fs/io_uring.c .private = current, current 3009 fs/io_uring.c if (signal_pending(current)) { current 3241 fs/io_uring.c mmgrab(current->mm); current 3242 fs/io_uring.c ctx->sqo_mm = current->mm; current 3527 fs/io_uring.c down_read(¤t->mm->mmap_sem); current 3545 fs/io_uring.c up_read(¤t->mm->mmap_sem); current 422 fs/ioctl.c if (fatal_signal_pending(current)) { current 593 fs/iomap/buffered-io.c if (fatal_signal_pending(current)) current 427 fs/iomap/direct-io.c dio->submit.waiter = current; current 183 fs/jbd2/journal.c journal->j_task = current; current 216 fs/jbd2/journal.c if (freezing(current)) { current 542 fs/jbd2/journal.c if (journal->j_running_transaction && !current->journal_info) { current 592 fs/jbd2/journal.c J_ASSERT(!current->journal_info); current 318 fs/jbd2/transaction.c current->comm, blocks, rsv_blocks, current 416 fs/jbd2/transaction.c current->journal_info = handle; current 554 fs/jbd2/transaction.c if (WARN_ON(current->journal_info)) { current 708 fs/jbd2/transaction.c current->journal_info = NULL; current 1782 fs/jbd2/transaction.c pid = current->pid; current 1811 fs/jbd2/transaction.c current->journal_info = NULL; current 1838 fs/jbd2/transaction.c if (handle->h_sync && !(current->flags & PF_MEMALLOC)) current 85 fs/jffs2/background.c c->gc_task = current; current 88 fs/jffs2/background.c set_user_nice(current, 10); current 122 fs/jffs2/background.c while (signal_pending(current) || freezing(current)) { current 80 fs/jffs2/debug.h task_pid_nr(current), __func__, ##__VA_ARGS__) current 84 fs/jffs2/debug.h task_pid_nr(current), __func__, ##__VA_ARGS__) current 88 fs/jffs2/debug.h task_pid_nr(current), __func__, ##__VA_ARGS__) current 92 fs/jffs2/debug.h task_pid_nr(current), __func__, ##__VA_ARGS__) current 174 fs/jffs2/nodemgmt.c DECLARE_WAITQUEUE(wait, current); current 190 fs/jffs2/nodemgmt.c if (signal_pending(current)) current 43 fs/jffs2/os-linux.h DECLARE_WAITQUEUE(__wait, current); \ current 24 fs/jfs/jfs_lock.h DECLARE_WAITQUEUE(__wait, current); \ current 1566 fs/jfs/jfs_logmgr.c DECLARE_WAITQUEUE(__wait, current); current 2334 fs/jfs/jfs_logmgr.c if (freezing(current)) { current 42 fs/jfs/jfs_metapage.c DECLARE_WAITQUEUE(wait, current); current 120 fs/jfs/jfs_txnmgr.c DECLARE_WAITQUEUE(wait, current); current 2783 fs/jfs/jfs_txnmgr.c if (freezing(current)) { current 2787 fs/jfs/jfs_txnmgr.c DECLARE_WAITQUEUE(wq, current); current 2972 fs/jfs/jfs_txnmgr.c if (freezing(current)) { current 25 fs/lockd/procfs.c struct lockd_net *ln = net_generic(current->nsproxy->net_ns, current 52 fs/lockd/procfs.c struct lockd_net *ln = net_generic(current->nsproxy->net_ns, current 162 fs/lockd/svc.c flush_signals(current); current 181 fs/lockd/svc.c flush_signals(current); current 49 fs/lockd/svc4proc.c lock->fl.fl_pid = current->tgid; current 79 fs/lockd/svcproc.c lock->fl.fl_pid = current->tgid; current 496 fs/locks.c fl->fl_pid = current->tgid; current 555 fs/locks.c fl->fl_owner = current->files; current 556 fs/locks.c fl->fl_pid = current->tgid; current 603 fs/locks.c __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); current 621 fs/locks.c fl->fl_pid = current->tgid; current 1424 fs/locks.c if (fl->fl_owner != current->files && current 1452 fs/locks.c fl.fl_pid = current->tgid; current 1472 fs/locks.c fl.fl_owner = current->files; current 2313 fs/locks.c flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); current 2335 fs/locks.c flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); current 2540 fs/locks.c spin_lock(¤t->files->file_lock); current 2542 fs/locks.c spin_unlock(¤t->files->file_lock); current 2671 fs/locks.c spin_lock(¤t->files->file_lock); current 2673 fs/locks.c spin_unlock(¤t->files->file_lock); current 2714 fs/locks.c lock.fl_pid = current->tgid; current 81 fs/mpage.c if (bio == NULL && (current->flags & PF_MEMALLOC)) { current 513 fs/namei.c struct nameidata *old = current->nameidata; current 519 fs/namei.c current->nameidata = p; current 524 fs/namei.c struct nameidata *now = current->nameidata, *old = now->saved; current 526 fs/namei.c current->nameidata = old; current 803 fs/namei.c struct fs_struct *fs = current->fs; current 864 fs/namei.c struct nameidata *nd = current->nameidata; current 2204 fs/namei.c struct fs_struct *fs = current->fs; current 2214 fs/namei.c get_fs_pwd(current->fs, &nd->path); current 668 fs/namespace.c struct mnt_namespace *ns = current->nsproxy->mnt_ns; current 776 fs/namespace.c return mnt->mnt_ns == current->nsproxy->mnt_ns; current 1175 fs/namespace.c struct task_struct *task = current; current 1522 fs/namespace.c if (&mnt->mnt == current->fs->root.mnt || current 1564 fs/namespace.c if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { current 1643 fs/namespace.c return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); current 1746 fs/namespace.c return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; current 2045 fs/namespace.c struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; current 2322 fs/namespace.c struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; current 3470 fs/namespace.c ns = alloc_mnt_ns(current->nsproxy->mnt_ns->user_ns, true); current 3641 fs/namespace.c get_fs_root(current->fs, &root); current 3694 fs/namespace.c touch_mnt_namespace(current->nsproxy->mnt_ns); current 3741 fs/namespace.c set_fs_pwd(current->fs, &root); current 3742 fs/namespace.c set_fs_root(current->fs, &root); current 3827 fs/namespace.c ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; current 3833 fs/namespace.c get_fs_root(current->fs, &fs_root); current 3910 fs/namespace.c struct mnt_namespace *ns = current->nsproxy->mnt_ns; current 3966 fs/namespace.c struct fs_struct *fs = current->fs; current 126 fs/nfs/blocklayout/blocklayout.c if (!bio && (current->flags & PF_MEMALLOC)) { current 62 fs/nfs/blocklayout/rpc_pipefs.c DECLARE_WAITQUEUE(wq, current); current 85 fs/nfs/callback.c if (signal_pending(current)) current 86 fs/nfs/callback.c flush_signals(current); current 117 fs/nfs/callback.c if (signal_pending(current)) current 118 fs/nfs/callback.c flush_signals(current); current 626 fs/nfs/file.c current->backing_dev_info = inode_to_bdi(inode); current 628 fs/nfs/file.c current->backing_dev_info = NULL; current 79 fs/nfs/inode.c if (signal_pending_state(mode, current)) current 857 fs/nfs/inode.c l_ctx->lockowner = current->files; current 867 fs/nfs/inode.c if (pos->lockowner != current->files) current 22 fs/nfs/nfs3acl.c struct posix_acl *sentinel = uncached_acl_sentinel(current); current 31 fs/nfs/nfs3acl.c struct posix_acl *sentinel = uncached_acl_sentinel(current); current 41 fs/nfs/nfs3acl.c struct posix_acl *sentinel = uncached_acl_sentinel(current); current 41 fs/nfs/nfs3proc.c } while (!fatal_signal_pending(current)); current 328 fs/nfs/nfs3proc.c data->arg.create.verifier[1] = cpu_to_be32(current->pid); current 409 fs/nfs/nfs4proc.c if (!__fatal_signal_pending(current)) current 419 fs/nfs/nfs4proc.c if (!signal_pending(current)) current 421 fs/nfs/nfs4proc.c return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; current 1648 fs/nfs/nfs4proc.c if (!signal_pending(current)) { current 3165 fs/nfs/nfs4proc.c [1] = (__u32)current->pid, current 7121 fs/nfs/nfs4proc.c struct nfs4_lock_waiter waiter = { .task = current, current 165 fs/nfs/nfs4super.c if (p->task == current) current 181 fs/nfs/nfs4super.c new->task = current; current 952 fs/nfs/super.c data->net = current->nsproxy->net_ns; current 2315 fs/nfs/super.c data->net = current->nsproxy->net_ns; current 1229 fs/nfs/write.c do_flush |= l_ctx->lockowner != current->files; current 243 fs/nfsd/export.c nfsd_file_cache_purge(current->nsproxy->net_ns); current 50 fs/nfsd/fault_inject.c struct net *net = current->nsproxy->net_ns; current 199 fs/nfsd/nfs4layouts.c fl->fl_pid = current->tgid; current 257 fs/nfsd/nfs4proc.c current->fs->umask = open->op_umask; current 263 fs/nfsd/nfs4proc.c current->fs->umask = 0; current 609 fs/nfsd/nfs4proc.c current->fs->umask = create->cr_umask; current 678 fs/nfsd/nfs4proc.c current->fs->umask = 0; current 4799 fs/nfsd/nfs4state.c fl->fl_pid = current->tgid; current 6539 fs/nfsd/nfs4state.c file_lock->fl_pid = current->tgid; current 6695 fs/nfsd/nfs4state.c file_lock->fl_pid = current->tgid; current 6757 fs/nfsd/nfs4state.c file_lock->fl_pid = current->tgid; current 7017 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7035 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7058 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7085 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7121 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7192 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7225 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7246 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7270 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7323 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7357 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7378 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7401 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7452 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7486 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7508 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7555 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 7577 fs/nfsd/nfs4state.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 157 fs/nfsd/nfsctl.c return exports_net_open(current->nsproxy->net_ns, file); current 1343 fs/nfsd/nfsctl.c struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, current 888 fs/nfsd/nfssvc.c current->fs->umask = 0; current 925 fs/nfsd/nfssvc.c flush_signals(current); current 935 fs/nfsd/vfs.c dprintk("nfsd: write defer %d\n", task_pid_nr(current)); current 937 fs/nfsd/vfs.c dprintk("nfsd: write resume %d\n", task_pid_nr(current)); current 941 fs/nfsd/vfs.c dprintk("nfsd: write sync %d\n", task_pid_nr(current)); current 960 fs/nfsd/vfs.c unsigned int pflags = current->flags; current 972 fs/nfsd/vfs.c current->flags |= PF_LESS_THROTTLE; current 167 fs/nilfs2/nilfs.h struct nilfs_transaction_info *ti = current->journal_info; current 174 fs/nilfs2/nilfs.h struct nilfs_transaction_info *ti = current->journal_info; current 149 fs/nilfs2/segment.c struct nilfs_transaction_info *cur_ti = current->journal_info; current 162 fs/nilfs2/segment.c save = current->journal_info; current 175 fs/nilfs2/segment.c current->journal_info = ti; current 217 fs/nilfs2/segment.c trace_ti = current->journal_info; current 235 fs/nilfs2/segment.c trace_ti = current->journal_info; current 242 fs/nilfs2/segment.c ti = current->journal_info; current 243 fs/nilfs2/segment.c current->journal_info = ti->ti_save; current 263 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; current 287 fs/nilfs2/segment.c current->journal_info = ti->ti_save; current 299 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; current 314 fs/nilfs2/segment.c current->journal_info = ti->ti_save; current 334 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; current 347 fs/nilfs2/segment.c struct nilfs_transaction_info *cur_ti = current->journal_info; current 356 fs/nilfs2/segment.c current->journal_info = ti; current 380 fs/nilfs2/segment.c struct nilfs_transaction_info *ti = current->journal_info; current 387 fs/nilfs2/segment.c current->journal_info = ti->ti_save; current 2171 fs/nilfs2/segment.c init_waitqueue_entry(&wait_req.wq, current); current 2181 fs/nilfs2/segment.c if (!signal_pending(current)) { current 2241 fs/nilfs2/segment.c BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC); current 2538 fs/nilfs2/segment.c sci->sc_timer_task = current; current 2541 fs/nilfs2/segment.c sci->sc_task = current; current 2569 fs/nilfs2/segment.c if (freezing(current)) { current 255 fs/notify/dnotify/dnotify.c fl_owner_t id = current->files; current 357 fs/notify/dnotify/dnotify.c __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); current 331 fs/notify/fanotify/fanotify.c event->pid = get_pid(task_pid(current)); current 333 fs/notify/fanotify/fanotify.c event->pid = get_pid(task_tgid(current)); current 362 fs/notify/fanotify/fanotify_user.c if (signal_pending(current)) current 824 fs/notify/fanotify/fanotify_user.c group->memcg = get_mem_cgroup_from_mm(current->mm); current 249 fs/notify/inotify/inotify_user.c if (signal_pending(current)) current 645 fs/notify/inotify/inotify_user.c group->memcg = get_mem_cgroup_from_mm(current->mm); current 1903 fs/ntfs/file.c if (fatal_signal_pending(current)) { current 1934 fs/ntfs/file.c current->backing_dev_info = inode_to_bdi(vi); current 1938 fs/ntfs/file.c current->backing_dev_info = NULL; current 7528 fs/ocfs2/alloc.c if (fatal_signal_pending(current)) { current 2113 fs/ocfs2/aops.c dwc->dw_writer_pid = task_pid_nr(current); current 2320 fs/ocfs2/aops.c if (dwc->dw_writer_pid != task_pid_nr(current)) { current 2335 fs/ocfs2/aops.c BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); current 1207 fs/ocfs2/cluster/heartbeat.c set_user_nice(current, MIN_NICE); current 80 fs/ocfs2/cluster/masklog.c level, current->comm, task_pid_nr(current), current 1028 fs/ocfs2/cluster/tcp.c o2net_init_nst(&nst, msg_type, key, current, target_node); current 1901 fs/ocfs2/dlm/dlmdomain.c if (signal_pending(current)) { current 2121 fs/ocfs2/dlm/dlmdomain.c if (signal_pending(current)) { current 48 fs/ocfs2/dlm/dlmthread.c DECLARE_WAITQUEUE(wait, current); current 432 fs/ocfs2/dlmfs/userdlm.c if (signal_pending(current)) { current 798 fs/ocfs2/dlmglue.c oh->oh_owner_pid = get_pid(task_pid(current)); current 1495 fs/ocfs2/dlmglue.c if (catch_signals && signal_pending(current)) { current 2666 fs/ocfs2/dlmglue.c struct pid *pid = task_pid(current); current 3486 fs/ocfs2/dlmglue.c if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) { current 847 fs/ocfs2/inode.c trace_ocfs2_inode_is_valid_to_delete(current, osb->dc_task, current 865 fs/ocfs2/inode.c if (current == osb->dc_task) current 734 fs/ocfs2/quota_global.c if (current == osb->dc_task) { current 4531 fs/ocfs2/refcounttree.c if (fatal_signal_pending(current)) { current 467 fs/open.c set_fs_pwd(current->fs, &path); current 499 fs/open.c set_fs_pwd(current->fs, &f.file->f_path); current 527 fs/open.c set_fs_root(current->fs, &path); current 1190 fs/open.c int retval = __close_fd(current->files, fd); current 317 fs/orangefs/devorangefs-req.c current->comm); current 340 fs/orangefs/devorangefs-req.c current->comm); current 499 fs/orangefs/devorangefs-req.c current->comm); current 108 fs/orangefs/orangefs-bufmap.c if (signal_pending(current)) current 225 fs/orangefs/orangefs-mod.c current->comm); current 50 fs/orangefs/waitqueue.c current->comm); current 74 fs/orangefs/waitqueue.c op->upcall.tgid = current->tgid; current 75 fs/orangefs/waitqueue.c op->upcall.pid = current->pid; current 84 fs/orangefs/waitqueue.c current->comm, current 85 fs/orangefs/waitqueue.c current->pid); current 120 fs/orangefs/waitqueue.c current->comm); current 235 fs/orangefs/waitqueue.c current->comm); current 155 fs/overlayfs/copy_up.c if (signal_pending_state(TASK_KILLABLE, current)) { current 350 fs/pipe.c if (signal_pending(current)) { current 395 fs/pipe.c send_sig(SIGPIPE, current, 0); current 429 fs/pipe.c send_sig(SIGPIPE, current, 0); current 486 fs/pipe.c if (signal_pending(current)) { current 887 fs/pipe.c if (signal_pending(current)) current 115 fs/posix_acl.c sentinel = uncached_acl_sentinel(current); current 1060 fs/proc/base.c current->comm, task_pid_nr(current), task_pid_nr(task), current 1088 fs/proc/base.c if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) current 1107 fs/proc/base.c if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) current 1254 fs/proc/base.c if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) { current 1552 fs/proc/base.c if (same_thread_group(current, p)) current 2376 fs/proc/base.c if (p != current) { current 2415 fs/proc/base.c if (p != current) { current 2567 fs/proc/base.c if (current != task) { current 2592 fs/proc/base.c rv = mutex_lock_interruptible(¤t->signal->cred_guard_mutex); current 2599 fs/proc/base.c mutex_unlock(¤t->signal->cred_guard_mutex); current 3379 fs/proc/base.c is_same_tgroup = same_thread_group(current, task); current 308 fs/proc/fd.c if (p && same_thread_group(p, current)) current 320 fs/proc/inode.c get_area = current->mm->get_unmapped_area; current 437 fs/proc/kcore.c append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current, current 24 fs/proc/loadavg.c idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); current 186 fs/proc/root.c ctx->pid_ns = get_pid_ns(task_active_pid_ns(current)); current 16 fs/proc/self.c pid_t tgid = task_tgid_nr_ns(current, ns); current 57 fs/proc/task_nommu.c if (current->fs && current->fs->users > 1) current 58 fs/proc/task_nommu.c sbytes += kobjsize(current->fs); current 60 fs/proc/task_nommu.c bytes += kobjsize(current->fs); current 62 fs/proc/task_nommu.c if (current->files && atomic_read(¤t->files->count) > 1) current 63 fs/proc/task_nommu.c sbytes += kobjsize(current->files); current 65 fs/proc/task_nommu.c bytes += kobjsize(current->files); current 67 fs/proc/task_nommu.c if (current->sighand && refcount_read(¤t->sighand->count) > 1) current 68 fs/proc/task_nommu.c sbytes += kobjsize(current->sighand); current 70 fs/proc/task_nommu.c bytes += kobjsize(current->sighand); current 72 fs/proc/task_nommu.c bytes += kobjsize(current); /* includes kernel stack */ current 16 fs/proc/thread_self.c pid_t tgid = task_tgid_nr_ns(current, ns); current 17 fs/proc/thread_self.c pid_t pid = task_pid_nr_ns(current, ns); current 38 fs/ramfs/file-mmu.c return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); current 464 fs/read_write.c add_rchar(current, ret); current 466 fs/read_write.c inc_syscr(current); current 519 fs/read_write.c add_wchar(current, ret); current 521 fs/read_write.c inc_syscw(current); current 561 fs/read_write.c add_wchar(current, ret); current 563 fs/read_write.c inc_syscw(current); current 1041 fs/read_write.c add_rchar(current, ret); current 1042 fs/read_write.c inc_syscr(current); current 1065 fs/read_write.c add_wchar(current, ret); current 1066 fs/read_write.c inc_syscw(current); current 1094 fs/read_write.c add_rchar(current, ret); current 1095 fs/read_write.c inc_syscr(current); current 1117 fs/read_write.c add_wchar(current, ret); current 1118 fs/read_write.c inc_syscw(current); current 1190 fs/read_write.c add_rchar(current, ret); current 1191 fs/read_write.c inc_syscr(current); current 1300 fs/read_write.c add_wchar(current, ret); current 1301 fs/read_write.c inc_syscw(current); current 1468 fs/read_write.c add_rchar(current, retval); current 1469 fs/read_write.c add_wchar(current, retval); current 1479 fs/read_write.c inc_syscr(current); current 1480 fs/read_write.c inc_syscw(current); current 1676 fs/read_write.c add_rchar(current, ret); current 1678 fs/read_write.c add_wchar(current, ret); current 1681 fs/read_write.c inc_syscr(current); current 1682 fs/read_write.c inc_syscw(current); current 2218 fs/read_write.c if (fatal_signal_pending(current)) current 241 fs/readdir.c if (prev_reclen && signal_pending(current)) current 326 fs/readdir.c if (prev_reclen && signal_pending(current)) current 499 fs/readdir.c if (signal_pending(current)) current 1789 fs/reiserfs/inode.c if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) { current 2545 fs/reiserfs/inode.c if (checked && (current->flags & PF_MEMALLOC)) { current 2790 fs/reiserfs/inode.c th = (struct reiserfs_transaction_handle *)current-> current 2799 fs/reiserfs/inode.c struct reiserfs_transaction_handle *th = current->journal_info; current 2849 fs/reiserfs/inode.c th = (struct reiserfs_transaction_handle *)current-> current 2859 fs/reiserfs/inode.c struct reiserfs_transaction_handle *th = current->journal_info; current 2911 fs/reiserfs/inode.c th = current->journal_info; current 3013 fs/reiserfs/inode.c th = current->journal_info; current 2962 fs/reiserfs/journal.c init_waitqueue_entry(&wait, current); current 3148 fs/reiserfs/journal.c th = current->journal_info; current 3185 fs/reiserfs/journal.c struct reiserfs_transaction_handle *cur_th = current->journal_info; current 3199 fs/reiserfs/journal.c struct reiserfs_transaction_handle *cur_th = current->journal_info; current 3213 fs/reiserfs/journal.c struct reiserfs_transaction_handle *cur_th = current->journal_info; current 3236 fs/reiserfs/journal.c th->t_handle_save = current->journal_info; current 3237 fs/reiserfs/journal.c current->journal_info = th; current 3240 fs/reiserfs/journal.c current->journal_info = th; current 3243 fs/reiserfs/journal.c BUG_ON(current->journal_info != th); current 3250 fs/reiserfs/journal.c current->journal_info = th->t_handle_save; current 3377 fs/reiserfs/journal.c if (!current->journal_info && th->t_refcount > 1) current 3389 fs/reiserfs/journal.c current->journal_info; current 3398 fs/reiserfs/journal.c memcpy(current->journal_info, th, sizeof(*th)); current 4006 fs/reiserfs/journal.c current->journal_info = th->t_handle_save; current 4053 fs/reiserfs/journal.c current->journal_info = th; current 4060 fs/reiserfs/journal.c current->journal_info = th->t_handle_save; current 26 fs/reiserfs/lock.c if (sb_i->lock_owner != current) { current 28 fs/reiserfs/lock.c sb_i->lock_owner = current; current 44 fs/reiserfs/lock.c BUG_ON(sb_i->lock_owner != current); current 58 fs/reiserfs/lock.c if (sb_i->lock_owner != current) current 79 fs/reiserfs/lock.c sb_i->lock_owner = current; current 2899 fs/reiserfs/reiserfs.h struct reiserfs_transaction_handle *th = current->journal_info; current 630 fs/reiserfs/stree.c current->comm, repeat_counter, current 2551 fs/reiserfs/super.c if (!current->journal_info) { current 2580 fs/reiserfs/super.c journal_mark_dirty(current->journal_info, bh); current 60 fs/select.c if (task_nice(current) > 0) current 84 fs/select.c if (rt_task(current)) current 90 fs/select.c if (ret < current->timer_slack_ns) current 91 fs/select.c return current->timer_slack_ns; current 124 fs/select.c pwq->polling_task = current; current 308 fs/select.c if (current->personality & STICKY_TIMEOUTS) current 429 fs/select.c fdt = files_fdtable(current->files); current 575 fs/select.c if (retval || timed_out || signal_pending(current)) current 638 fs/select.c fdt = files_fdtable(current->files); current 683 fs/select.c if (signal_pending(current)) current 924 fs/select.c if (signal_pending(current)) current 1064 fs/select.c restart_block = ¤t->restart_block; current 1189 fs/select.c fdt = files_fdtable(current->files); current 1229 fs/select.c if (signal_pending(current)) current 66 fs/signalfd.c poll_wait(file, ¤t->sighand->signalfd_wqh, wait); current 68 fs/signalfd.c spin_lock_irq(¤t->sighand->siglock); current 69 fs/signalfd.c if (next_signal(¤t->pending, &ctx->sigmask) || current 70 fs/signalfd.c next_signal(¤t->signal->shared_pending, current 73 fs/signalfd.c spin_unlock_irq(¤t->sighand->siglock); current 170 fs/signalfd.c DECLARE_WAITQUEUE(wait, current); current 172 fs/signalfd.c spin_lock_irq(¤t->sighand->siglock); current 173 fs/signalfd.c ret = dequeue_signal(current, &ctx->sigmask, info); current 181 fs/signalfd.c spin_unlock_irq(¤t->sighand->siglock); current 185 fs/signalfd.c add_wait_queue(¤t->sighand->signalfd_wqh, &wait); current 188 fs/signalfd.c ret = dequeue_signal(current, &ctx->sigmask, info); current 191 fs/signalfd.c if (signal_pending(current)) { current 195 fs/signalfd.c spin_unlock_irq(¤t->sighand->siglock); current 197 fs/signalfd.c spin_lock_irq(¤t->sighand->siglock); current 199 fs/signalfd.c spin_unlock_irq(¤t->sighand->siglock); current 201 fs/signalfd.c remove_wait_queue(¤t->sighand->signalfd_wqh, &wait); current 301 fs/signalfd.c spin_lock_irq(¤t->sighand->siglock); current 303 fs/signalfd.c spin_unlock_irq(¤t->sighand->siglock); current 305 fs/signalfd.c wake_up(¤t->sighand->signalfd_wqh); current 194 fs/splice.c send_sig(SIGPIPE, current, 0); current 234 fs/splice.c send_sig(SIGPIPE, current, 0); current 543 fs/splice.c if (signal_pending(current)) current 556 fs/splice.c if (signal_pending(current)) current 915 fs/splice.c pipe = current->splice_pipe; current 928 fs/splice.c current->splice_pipe = pipe; current 1076 fs/splice.c send_sig(SIGPIPE, current, 0); current 1083 fs/splice.c if (signal_pending(current)) current 1463 fs/splice.c if (signal_pending(current)) { current 1502 fs/splice.c send_sig(SIGPIPE, current, 0); current 1510 fs/splice.c if (signal_pending(current)) { current 1553 fs/splice.c send_sig(SIGPIPE, current, 0); current 1663 fs/splice.c send_sig(SIGPIPE, current, 0); current 217 fs/stat.c current->comm); current 281 fs/ubifs/commit.c c->bgt_name, current->pid); current 377 fs/ubifs/commit.c dbg_cmt("pid %d goes sleep", current->pid); current 388 fs/ubifs/commit.c dbg_cmt("commit finished, pid %d woke up", current->pid); current 203 fs/ubifs/compress.c current->pid, compr->name, PTR_ERR(compr->cc)); current 567 fs/ubifs/debug.c current->pid, lst->empty_lebs, lst->idx_lebs); current 586 fs/ubifs/debug.c current->pid, bi->data_growth + bi->dd_growth, current 740 fs/ubifs/debug.c pr_err("(pid %d) start dumping LEB properties\n", current->pid); current 753 fs/ubifs/debug.c pr_err("(pid %d) finish dumping LEB properties\n", current->pid); current 761 fs/ubifs/debug.c pr_err("(pid %d) dumping LPT information\n", current->pid); current 800 fs/ubifs/debug.c current->pid, sleb->lnum, offs); current 816 fs/ubifs/debug.c pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); current 840 fs/ubifs/debug.c pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum); current 892 fs/ubifs/debug.c current->pid, cat, heap->cnt); current 900 fs/ubifs/debug.c pr_err("(pid %d) finish dumping heap\n", current->pid); current 908 fs/ubifs/debug.c pr_err("(pid %d) dumping pnode:\n", current->pid); current 927 fs/ubifs/debug.c pr_err("(pid %d) start dumping TNC tree\n", current->pid); current 939 fs/ubifs/debug.c pr_err("(pid %d) finish dumping TNC tree\n", current->pid); current 158 fs/ubifs/debug.h pr_debug("UBIFS DBG " type " (pid %d): " fmt "\n", current->pid, \ current 164 fs/ubifs/debug.h pr_debug("UBIFS DBG " type " (pid %d): " fmt "%s\n", current->pid, \ current 1847 fs/ubifs/lpt_commit.c pr_err("(pid %d) start dumping LEB %d\n", current->pid, lnum); current 1933 fs/ubifs/lpt_commit.c pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum); current 1950 fs/ubifs/lpt_commit.c pr_err("(pid %d) start dumping all LPT LEBs\n", current->pid); current 1953 fs/ubifs/lpt_commit.c pr_err("(pid %d) finish dumping all LPT LEBs\n", current->pid); current 34 fs/ubifs/misc.c c->vi.ubi_num, c->vi.vol_id, current->pid, current 53 fs/ubifs/misc.c c->vi.ubi_num, c->vi.vol_id, current->pid, current 2242 fs/ubifs/super.c current->pid, name, (int)PTR_ERR(ubi)); current 2375 fs/ubifs/super.c current->pid, (unsigned int)PAGE_SIZE); current 2399 fs/ubifs/super.c current->pid, err); current 218 fs/userfaultfd.c msg.arg.pagefault.feat.ptid = task_pid_vnr(current); current 372 fs/userfaultfd.c if (current->flags & (PF_EXITING|PF_DUMPCORE)) current 459 fs/userfaultfd.c uwq.wq.private = current; current 495 fs/userfaultfd.c (return_to_userland ? !signal_pending(current) : current 496 fs/userfaultfd.c !fatal_signal_pending(current)))) { current 518 fs/userfaultfd.c (return_to_userland ? signal_pending(current) : current 519 fs/userfaultfd.c fatal_signal_pending(current))) current 528 fs/userfaultfd.c if (signal_pending(current) && current 529 fs/userfaultfd.c !fatal_signal_pending(current)) { current 589 fs/userfaultfd.c if (WARN_ON_ONCE(current->flags & PF_EXITING)) current 593 fs/userfaultfd.c init_waitqueue_entry(&ewq->wq, current); current 607 fs/userfaultfd.c fatal_signal_pending(current)) { current 1039 fs/userfaultfd.c DECLARE_WAITQUEUE(wait, current); current 1130 fs/userfaultfd.c if (signal_pending(current)) { current 1953 fs/userfaultfd.c BUG_ON(!current->mm); current 1972 fs/userfaultfd.c ctx->mm = current->mm; current 98 fs/verity/enable.c if (fatal_signal_pending(current)) current 27 fs/xfs/kmem.c current->comm, current->pid, current 113 fs/xfs/kmem.c current->comm, current->pid, current 134 fs/xfs/kmem.c current->comm, current->pid, current 26 fs/xfs/scrub/common.h if (fatal_signal_pending(current)) { current 107 fs/xfs/scrub/fscounters.c if (fatal_signal_pending(current)) current 199 fs/xfs/scrub/fscounters.c if (fatal_signal_pending(current)) current 1002 fs/xfs/xfs_aops.c if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == current 1010 fs/xfs/xfs_aops.c if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) current 421 fs/xfs/xfs_buf.c current->comm, current->pid, current 1122 fs/xfs/xfs_buf.c DECLARE_WAITQUEUE (wait, current); current 122 fs/xfs/xfs_discard.c if (fatal_signal_pending(current)) { current 641 fs/xfs/xfs_file.c current->backing_dev_info = inode_to_bdi(inode); current 679 fs/xfs/xfs_file.c current->backing_dev_info = NULL; current 242 fs/xfs/xfs_fsmap.c if (fatal_signal_pending(current)) current 104 fs/xfs/xfs_linux.h #define current_pid() (current->pid) current 105 fs/xfs/xfs_linux.h #define current_test_flags(f) (current->flags & (f)) current 107 fs/xfs/xfs_linux.h (*(sp) = current->flags, current->flags |= (f)) current 109 fs/xfs/xfs_linux.h (*(sp) = current->flags, current->flags &= ~(f)) current 111 fs/xfs/xfs_linux.h (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) current 3675 fs/xfs/xfs_log.c tic->t_task = current; current 547 fs/xfs/xfs_log_priv.h DECLARE_WAITQUEUE(wait, current); current 71 fs/xfs/xfs_pwork.c trace_xfs_pwork_init(mp, nr_threads, current->pid); current 74 fs/xfs/xfs_pwork.c current->pid); current 1163 fs/xfs/xfs_reflink.c if (fatal_signal_pending(current)) { current 73 include/acpi/platform/aclinuxex.h return (acpi_thread_id) (unsigned long)current; current 13 include/drm/drm_os_linux.h #define DRM_CURRENTPID task_pid_nr(current) current 35 include/drm/drm_os_linux.h DECLARE_WAITQUEUE(entry, current); \ current 48 include/drm/drm_os_linux.h if (signal_pending(current)) { \ current 289 include/linux/audit.h return current->audit_context; current 278 include/linux/backing-dev.h memcg_css = task_css(current, memory_cgrp_id); current 288 include/linux/backing-dev.h if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) current 316 include/linux/backing-dev.h memcg_css = task_get_css(current, memory_cgrp_id); current 256 include/linux/blk-cgroup.h return task_css(current, io_cgrp_id); current 308 include/linux/blk-cgroup.h css = task_css(current, io_cgrp_id); current 1830 include/linux/blkdev.h if (waiter == current) current 680 include/linux/cgroup.h current->no_cgroup_migration = 1; current 689 include/linux/cgroup.h current->no_cgroup_migration = 0; current 495 include/linux/compat.h struct task_struct *t = current; \ current 107 include/linux/context_tracking.h vtime_guest_enter(current); current 109 include/linux/context_tracking.h current->flags |= PF_VCPU; current 131 include/linux/context_tracking.h vtime_guest_exit(current); current 133 include/linux/context_tracking.h current->flags &= ~PF_VCPU; current 144 include/linux/context_tracking.h vtime_account_system(current); current 145 include/linux/context_tracking.h current->flags |= PF_VCPU; current 152 include/linux/context_tracking.h vtime_account_system(current); current 153 include/linux/context_tracking.h current->flags &= ~PF_VCPU; current 63 include/linux/cpuset.h #define cpuset_current_mems_allowed (current->mems_allowed) current 109 include/linux/cpuset.h return task_spread_page(current); current 114 include/linux/cpuset.h return task_spread_slab(current); current 135 include/linux/cpuset.h return read_seqcount_begin(¤t->mems_allowed_seq); current 149 include/linux/cpuset.h return read_seqcount_retry(¤t->mems_allowed_seq, seq); current 156 include/linux/cpuset.h task_lock(current); current 158 include/linux/cpuset.h write_seqcount_begin(¤t->mems_allowed_seq); current 159 include/linux/cpuset.h current->mems_allowed = nodemask; current 160 include/linux/cpuset.h write_seqcount_end(¤t->mems_allowed_seq); current 162 include/linux/cpuset.h task_unlock(current); current 198 include/linux/cred.h __validate_process_creds(current, __FILE__, __LINE__); \ current 298 include/linux/cred.h rcu_dereference_protected(current->cred, 1) current 307 include/linux/cred.h rcu_dereference_protected(current->real_cred, 1) current 87 include/linux/delayacct.h if (current->delays) current 88 include/linux/delayacct.h current->delays->flags |= flag; current 93 include/linux/delayacct.h if (current->delays) current 94 include/linux/delayacct.h current->delays->flags &= ~flag; current 118 include/linux/delayacct.h if (current->delays) current 146 include/linux/delayacct.h if (current->delays) current 152 include/linux/delayacct.h if (current->delays) current 158 include/linux/delayacct.h if (current->delays) current 164 include/linux/delayacct.h if (current->delays) current 16 include/linux/elf.h set_personality(PER_LINUX | (current->personality & (~PER_MASK))) current 104 include/linux/fdtable.h #define fcheck(fd) fcheck_files(current->files, fd) current 964 include/linux/filter.h proglen, pass, image, current->comm, task_pid_nr(current)); current 58 include/linux/freezer.h if (likely(!freezing(current))) current 65 include/linux/freezer.h if (!(current->flags & PF_NOFREEZE)) current 109 include/linux/freezer.h current->flags |= PF_FREEZER_SKIP; current 121 include/linux/freezer.h current->flags &= ~PF_FREEZER_SKIP; current 134 include/linux/freezer.h current->flags &= ~PF_FREEZER_SKIP; current 816 include/linux/ftrace.h atomic_inc(¤t->tracing_graph_pause); current 821 include/linux/ftrace.h atomic_dec(¤t->tracing_graph_pause); current 38 include/linux/hardirq.h account_irq_enter_time(current); \ current 54 include/linux/hardirq.h account_irq_exit_time(current); \ current 79 include/linux/ioprio.h struct io_context *ioc = current->io_context; current 40 include/linux/irqflags.h current->hardirq_context++; \ current 44 include/linux/irqflags.h current->hardirq_context--; \ current 48 include/linux/irqflags.h current->softirq_context++; \ current 52 include/linux/irqflags.h current->softirq_context--; \ current 1350 include/linux/jbd2.h return current->journal_info; current 237 include/linux/kernel.h # define sched_annotate_sleep() (current->task_state_change = 0) current 248 include/linux/kernel.h # define non_block_start() (current->non_block_count++) current 254 include/linux/kernel.h # define non_block_end() WARN_ON(current->non_block_count-- == 0) current 507 include/linux/kvm_host.h pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) current 509 include/linux/kvm_host.h pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) current 511 include/linux/kvm_host.h pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) current 513 include/linux/kvm_host.h pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ current 517 include/linux/kvm_host.h task_tgid_nr(current), ## __VA_ARGS__) current 633 include/linux/lockdep.h WARN_ONCE(debug_locks && !current->lockdep_recursion && \ current 634 include/linux/lockdep.h !current->hardirqs_enabled, \ current 639 include/linux/lockdep.h WARN_ONCE(debug_locks && !current->lockdep_recursion && \ current 640 include/linux/lockdep.h current->hardirqs_enabled, \ current 645 include/linux/lockdep.h WARN_ONCE(debug_locks && !current->lockdep_recursion && \ current 646 include/linux/lockdep.h !current->hardirq_context, \ current 562 include/linux/memcontrol.h WARN_ON(current->in_user_fault); current 563 include/linux/memcontrol.h current->in_user_fault = 1; current 568 include/linux/memcontrol.h WARN_ON(!current->in_user_fault); current 569 include/linux/memcontrol.h current->in_user_fault = 0; current 61 include/linux/oom.h current->signal->oom_flag_origin = true; current 66 include/linux/oom.h current->signal->oom_flag_origin = false; current 134 include/linux/percpu-rwsem.h atomic_long_set(&sem->rw_sem.owner, (long)current); current 48 include/linux/perf_regs.h regs_user->regs = task_pt_regs(current); current 49 include/linux/perf_regs.h regs_user->abi = perf_reg_abi(current); current 15 include/linux/personality.h #define set_personality(pers) (current->personality = (pers)) current 157 include/linux/ptrace.h if (unlikely(ptrace_event_enabled(current, event))) { current 158 include/linux/ptrace.h current->ptrace_message = message; current 162 include/linux/ptrace.h if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) current 163 include/linux/ptrace.h send_sig(SIGTRAP, current, 0); current 190 include/linux/ptrace.h ns = task_active_pid_ns(rcu_dereference(current->parent)); current 216 include/linux/ptrace.h if (unlikely(ptrace) && current->ptrace) { current 217 include/linux/ptrace.h child->ptrace = current->ptrace; current 218 include/linux/ptrace.h __ptrace_link(child, current->parent, current->ptracer_cred); current 401 include/linux/ptrace.h #define current_pt_regs() task_pt_regs(current) current 410 include/linux/ptrace.h #define signal_pt_regs() task_pt_regs(current) current 63 include/linux/ratelimit.h current->comm, rs->missed); current 53 include/linux/rcupdate.h #define rcu_preempt_depth() (current->rcu_read_lock_nesting) current 161 include/linux/rcupdate.h rcu_tasks_qs(current); \ current 52 include/linux/rcutiny.h rcu_tasks_qs(current); \ current 35 include/linux/rcuwait.h rcu_assign_pointer((w)->task, current); \ current 560 include/linux/sbitmap.h .private = current, \ current 132 include/linux/sched.h current->task_state_change = _THIS_IP_; \ current 133 include/linux/sched.h current->state = (state_value); \ current 139 include/linux/sched.h current->task_state_change = _THIS_IP_; \ current 140 include/linux/sched.h smp_store_mb(current->state, (state_value)); \ current 147 include/linux/sched.h raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current 148 include/linux/sched.h current->task_state_change = _THIS_IP_; \ current 149 include/linux/sched.h current->state = (state_value); \ current 150 include/linux/sched.h raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ current 191 include/linux/sched.h current->state = (state_value) current 194 include/linux/sched.h smp_store_mb(current->state, (state_value)) current 205 include/linux/sched.h raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current 206 include/linux/sched.h current->state = (state_value); \ current 207 include/linux/sched.h raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ current 1488 include/linux/sched.h #define clear_used_math() clear_stopped_child_used_math(current) current 1489 include/linux/sched.h #define set_used_math() set_stopped_child_used_math(current) current 1494 include/linux/sched.h #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) current 1497 include/linux/sched.h do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) current 1501 include/linux/sched.h #define used_math() tsk_used_math(current) current 1506 include/linux/sched.h return (current->flags & PF_NO_SETAFFINITY) && current 1507 include/linux/sched.h (current->nr_cpus_allowed == 1); current 1567 include/linux/sched.h current->flags &= ~flags; current 1568 include/linux/sched.h current->flags |= orig_flags & flags; current 1889 include/linux/sched.h if (current->rseq) current 1897 include/linux/sched.h __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); current 1927 include/linux/sched.h t->rseq = current->rseq; current 1928 include/linux/sched.h t->rseq_sig = current->rseq_sig; current 1929 include/linux/sched.h t->rseq_event_mask = current->rseq_event_mask; current 182 include/linux/sched/mm.h if (unlikely(current->flags & current 188 include/linux/sched/mm.h if (current->flags & PF_MEMALLOC_NOIO) current 190 include/linux/sched/mm.h else if (current->flags & PF_MEMALLOC_NOFS) current 193 include/linux/sched/mm.h if (current->flags & PF_MEMALLOC_NOCMA) current 225 include/linux/sched/mm.h unsigned int flags = current->flags & PF_MEMALLOC_NOIO; current 226 include/linux/sched/mm.h current->flags |= PF_MEMALLOC_NOIO; current 240 include/linux/sched/mm.h current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; current 256 include/linux/sched/mm.h unsigned int flags = current->flags & PF_MEMALLOC_NOFS; current 257 include/linux/sched/mm.h current->flags |= PF_MEMALLOC_NOFS; current 271 include/linux/sched/mm.h current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; current 276 include/linux/sched/mm.h unsigned int flags = current->flags & PF_MEMALLOC; current 277 include/linux/sched/mm.h current->flags |= PF_MEMALLOC; current 283 include/linux/sched/mm.h current->flags = (current->flags & ~PF_MEMALLOC) | flags; current 289 include/linux/sched/mm.h unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; current 291 include/linux/sched/mm.h current->flags |= PF_MEMALLOC_NOCMA; current 297 include/linux/sched/mm.h current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; current 323 include/linux/sched/mm.h WARN_ON_ONCE(current->active_memcg); current 324 include/linux/sched/mm.h current->active_memcg = memcg; current 335 include/linux/sched/mm.h current->active_memcg = NULL; current 367 include/linux/sched/mm.h if (current->mm != mm) current 271 include/linux/sched/signal.h struct task_struct *task = current; current 284 include/linux/sched/signal.h spin_lock_irq(¤t->sighand->siglock); current 285 include/linux/sched/signal.h if (current->jobctl & JOBCTL_STOP_DEQUEUED) current 287 include/linux/sched/signal.h spin_unlock_irq(¤t->sighand->siglock); current 343 include/linux/sched/signal.h set_tsk_thread_flag(current, TIF_SIGPENDING); current 444 include/linux/sched/signal.h current->restore_sigmask = true; current 452 include/linux/sched/signal.h current->restore_sigmask = false; current 456 include/linux/sched/signal.h return current->restore_sigmask; current 464 include/linux/sched/signal.h if (!current->restore_sigmask) current 466 include/linux/sched/signal.h current->restore_sigmask = false; current 474 include/linux/sched/signal.h __set_current_blocked(¤t->saved_sigmask); current 489 include/linux/sched/signal.h sigset_t *res = ¤t->blocked; current 491 include/linux/sched/signal.h res = ¤t->saved_sigmask; current 518 include/linux/sched/signal.h if (current->sas_ss_flags & SS_AUTODISARM) current 522 include/linux/sched/signal.h return sp >= current->sas_ss_sp && current 523 include/linux/sched/signal.h sp - current->sas_ss_sp < current->sas_ss_size; current 525 include/linux/sched/signal.h return sp > current->sas_ss_sp && current 526 include/linux/sched/signal.h sp - current->sas_ss_sp <= current->sas_ss_size; current 532 include/linux/sched/signal.h if (!current->sas_ss_size) current 549 include/linux/sched/signal.h return current->sas_ss_sp; current 551 include/linux/sched/signal.h return current->sas_ss_sp + current->sas_ss_size; current 698 include/linux/sched/signal.h return task_rlimit(current, limit); current 703 include/linux/sched/signal.h return task_rlimit_max(current, limit); current 83 include/linux/sched/task_stack.h void *stack = task_stack_page(current); current 449 include/linux/signal.h struct task_struct *t = current; \ current 22 include/linux/sunrpc/types.h #define signalled() (signal_pending(current)) current 67 include/linux/swait.h .task = current, \ current 37 include/linux/swap.h return current->flags & PF_KSWAPD; current 1374 include/linux/syscalls.h return __close_fd(current->files, fd); current 1397 include/linux/syscalls.h unsigned int old = current->personality; current 13 include/linux/task_io_accounting_ops.h current->ioac.read_bytes += bytes; current 27 include/linux/task_io_accounting_ops.h current->ioac.write_bytes += bytes; current 41 include/linux/task_io_accounting_ops.h current->ioac.cancelled_write_bytes += bytes; current 22 include/linux/thread_info.h #define current_thread_info() ((struct thread_info *)current) current 562 include/linux/trace_events.h tracing_record_cmdline(current); \ current 60 include/linux/tracehook.h int ptrace = current->ptrace; current 65 include/linux/tracehook.h current->ptrace_message = message; current 73 include/linux/tracehook.h if (current->exit_code) { current 74 include/linux/tracehook.h send_sig(current->exit_code, current, 1); current 75 include/linux/tracehook.h current->exit_code = 0; current 78 include/linux/tracehook.h current->ptrace_message = 0; current 79 include/linux/tracehook.h return fatal_signal_pending(current); current 187 include/linux/tracehook.h if (unlikely(current->task_works)) current 191 include/linux/tracehook.h if (unlikely(current->cached_requested_key)) { current 192 include/linux/tracehook.h key_put(current->cached_requested_key); current 193 include/linux/tracehook.h current->cached_requested_key = NULL; current 168 include/linux/uaccess.h current->pagefault_disabled++; current 173 include/linux/uaccess.h current->pagefault_disabled--; current 208 include/linux/uaccess.h return current->pagefault_disabled != 0; current 82 include/linux/utsname.h return ¤t->nsproxy->uts_ns->name; current 508 include/linux/wait.h current->timer_slack_ns, \ current 1133 include/linux/wait.h .private = current, \ current 1142 include/linux/wait.h (wait)->private = current; \ current 42 include/linux/wait_bit.h .private = current, \ current 132 include/linux/ww_mutex.h ctx->task = current; current 39 include/net/busy_poll.h return sk->sk_ll_usec && !signal_pending(current); current 44 include/net/cls_cgroup.h classid = task_cls_classid(current); current 50 include/net/cls_cgroup.h u32 classid = task_cls_state(current)->classid; current 39 include/net/netprio_cgroup.h sock_cgroup_set_prioidx(skcd, task_netprioidx(current)); current 85 include/net/scm.h scm_set_cred(scm, task_tgid(current), current_uid(), current_gid()); current 2261 include/net/sock.h return ¤t->task_frag; current 691 include/net/xfrm.h audit_get_loginuid(current) : current 693 include/net/xfrm.h const unsigned int ses = task_valid ? audit_get_sessionid(current) : current 172 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 244 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 309 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 376 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 405 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 467 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 486 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 539 include/trace/events/block.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 46 include/trace/events/power_cpu_migrate.h __define_cpu_migrate_event(current); current 113 include/trace/events/sched.h BUG_ON(p != current); current 275 include/trace/events/sched.h memcpy(__entry->comm, current->comm, TASK_COMM_LEN); current 277 include/trace/events/sched.h __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ current 31 include/trace/events/syscalls.h syscall_get_arguments(current, regs, __entry->args); current 56 include/trace/events/syscalls.h __entry->id = syscall_get_nr(current, regs); current 677 include/trace/events/writeback.h __entry->dirtied_pause = current->nr_dirtied_pause; current 678 include/trace/events/writeback.h __entry->think = current->dirty_paused_when == 0 ? 0 : current 679 include/trace/events/writeback.h (long)(jiffies - current->dirty_paused_when) * 1000/HZ; current 394 init/do_mounts.c s = current->fs->pwd.dentry->d_sb; current 81 init/do_mounts_initrd.c current->flags |= PF_FREEZER_SKIP; current 89 init/do_mounts_initrd.c current->flags &= ~PF_FREEZER_SKIP; current 882 init/main.c printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current)); current 1175 init/main.c cad_pid = task_pid(current); current 20 ipc/ipc_sysctl.c struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; current 51 ipc/ipc_sysctl.c struct ipc_namespace *ns = current->nsproxy->ipc_ns; current 91 ipc/ipc_sysctl.c struct ipc_namespace *ns = current->nsproxy->ipc_ns; current 97 ipc/ipc_sysctl.c ret = sem_check_semmni(current->nsproxy->ipc_ns); current 16 ipc/mq_sysctl.c struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; current 387 ipc/mqueue.c ctx->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); current 595 ipc/mqueue.c if (task_tgid(current) == info->notify_owner) current 627 ipc/mqueue.c if (walk->task->prio <= current->prio) { current 665 ipc/mqueue.c if (signal_pending(current)) { current 731 ipc/mqueue.c sig_i.si_pid = task_tgid_nr_ns(current, current 818 ipc/mqueue.c struct vfsmount *mnt = current->nsproxy->ipc_ns->mq_mnt; current 880 ipc/mqueue.c struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; current 1062 ipc/mqueue.c wait.task = current; current 1166 ipc/mqueue.c wait.task = current; current 1313 ipc/mqueue.c if (info->notify_owner == task_tgid(current)) { current 1335 ipc/mqueue.c info->notify_self_exec_id = current->self_exec_id; current 1339 ipc/mqueue.c info->notify_owner = get_pid(task_tgid(current)); current 185 ipc/msg.c mss->tsk = current; current 285 ipc/msg.c ns = current->nsproxy->ipc_ns; current 579 ipc/msg.c ns = current->nsproxy->ipc_ns; current 711 ipc/msg.c ns = current->nsproxy->ipc_ns; current 826 ipc/msg.c ns = current->nsproxy->ipc_ns; current 898 ipc/msg.c if (signal_pending(current)) { current 905 ipc/msg.c ipc_update_pid(&msq->q_lspid, task_tgid(current)); current 1050 ipc/msg.c !security_msg_queue_msgrcv(&msq->q_perm, msg, current, current 1076 ipc/msg.c ns = current->nsproxy->ipc_ns; current 1135 ipc/msg.c ipc_update_pid(&msq->q_lrpid, task_tgid(current)); current 1151 ipc/msg.c msr_d.r_tsk = current; current 1201 ipc/msg.c if (signal_pending(current)) { current 188 ipc/namespace.c exit_sem(current); current 591 ipc/sem.c ns = current->nsproxy->ipc_ns; current 1067 ipc/sem.c current->comm, task_pid_nr(current)); current 1376 ipc/sem.c ipc_update_pid(&curr->sempid, task_tgid(current)); current 1497 ipc/sem.c ipc_update_pid(&sma->sems[i].sempid, task_tgid(current)); current 1643 ipc/sem.c ns = current->nsproxy->ipc_ns; current 1763 ipc/sem.c ns = current->nsproxy->ipc_ns; current 1836 ipc/sem.c undo_list = current->sysvsem.undo_list; current 1845 ipc/sem.c current->sysvsem.undo_list = undo_list; current 1980 ipc/sem.c ns = current->nsproxy->ipc_ns; current 2091 ipc/sem.c queue.pid = task_tgid(current); current 2152 ipc/sem.c queue.sleeper = current; current 2206 ipc/sem.c } while (error == -EINTR && !signal_pending(current)); /* spurious */ current 2397 ipc/sem.c ipc_update_pid(&semaphore->sempid, task_tgid(current)); current 253 ipc/shm.c ipc_update_pid(&shp->shm_lprid, task_tgid(current)); current 340 ipc/shm.c ipc_update_pid(&shp->shm_lprid, task_tgid(current)); current 669 ipc/shm.c shp->shm_cprid = get_pid(task_tgid(current)); current 676 ipc/shm.c shp->shm_creator = current; current 683 ipc/shm.c list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); current 736 ipc/shm.c ns = current->nsproxy->ipc_ns; current 1149 ipc/shm.c ns = current->nsproxy->ipc_ns; current 1341 ipc/shm.c ns = current->nsproxy->ipc_ns; current 1479 ipc/shm.c ns = current->nsproxy->ipc_ns; current 1547 ipc/shm.c if (down_write_killable(¤t->mm->mmap_sem)) { current 1557 ipc/shm.c if (find_vma_intersection(current->mm, addr, addr + size)) current 1567 ipc/shm.c up_write(¤t->mm->mmap_sem); current 1628 ipc/shm.c struct mm_struct *mm = current->mm; current 872 ipc/util.c iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); current 873 ipc/util.c iter->pid_ns = get_pid_ns(task_active_pid_ns(current)); current 197 kernel/acct.c struct pid_namespace *ns = task_active_pid_ns(current); current 291 kernel/acct.c pin_kill(task_active_pid_ns(current)->bacct); current 417 kernel/acct.c struct pacct_struct *pacct = ¤t->signal->pacct; current 428 kernel/acct.c strlcpy(ac->ac_comm, current->comm, sizeof(ac->ac_comm)); current 432 kernel/acct.c run_time -= current->group_leader->start_time; current 456 kernel/acct.c spin_lock_irq(¤t->sighand->siglock); current 457 kernel/acct.c tty = current->signal->tty; /* Safe as we hold the siglock */ current 466 kernel/acct.c spin_unlock_irq(¤t->sighand->siglock); current 481 kernel/acct.c flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; current 482 kernel/acct.c current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; current 506 kernel/acct.c ac.ac_pid = task_tgid_nr_ns(current, ns); current 508 kernel/acct.c ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), current 524 kernel/acct.c current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; current 535 kernel/acct.c struct pacct_struct *pacct = ¤t->signal->pacct; current 539 kernel/acct.c if (group_dead && current->mm) { current 542 kernel/acct.c down_read(¤t->mm->mmap_sem); current 543 kernel/acct.c vma = current->mm->mmap; current 548 kernel/acct.c up_read(¤t->mm->mmap_sem); current 551 kernel/acct.c spin_lock_irq(¤t->sighand->siglock); current 554 kernel/acct.c if (thread_group_leader(current)) { current 556 kernel/acct.c if (current->flags & PF_FORKNOEXEC) current 559 kernel/acct.c if (current->flags & PF_SUPERPRIV) current 561 kernel/acct.c if (current->flags & PF_DUMPCORE) current 563 kernel/acct.c if (current->flags & PF_SIGNALED) current 566 kernel/acct.c task_cputime(current, &utime, &stime); current 569 kernel/acct.c pacct->ac_minflt += current->min_flt; current 570 kernel/acct.c pacct->ac_majflt += current->maj_flt; current 571 kernel/acct.c spin_unlock_irq(¤t->sighand->siglock); current 600 kernel/acct.c for (ns = task_active_pid_ns(current); ns != NULL; ns = ns->parent) { current 120 kernel/async.c entry->func, task_pid_nr(current)); current 209 kernel/async.c current->flags |= PF_USED_ASYNC; current 293 kernel/async.c pr_debug("async_waiting @ %i\n", task_pid_nr(current)); current 304 kernel/async.c task_pid_nr(current), current 231 kernel/audit.c audit_cmd_mutex.owner = current; current 252 kernel/audit.c return (current == audit_cmd_mutex.owner); current 1024 kernel/audit.c if (task_active_pid_ns(current) != &init_pid_ns) current 1047 kernel/audit.c pid_t pid = task_tgid_nr(current); current 1233 kernel/audit.c struct pid *req_pid = task_tgid(current); current 1449 kernel/audit.c t = READ_ONCE(current->signal->audit_tty); current 1470 kernel/audit.c t = READ_ONCE(current->signal->audit_tty); current 1473 kernel/audit.c t = xchg(¤t->signal->audit_tty, t); current 1769 kernel/audit.c if (!(auditd_test_task(current) || audit_ctl_owner_current())) { current 1780 kernel/audit.c DECLARE_WAITQUEUE(wait, current); current 2047 kernel/audit.c unsigned int sessionid = audit_get_sessionid(current); current 2048 kernel/audit.c uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current)); current 2069 kernel/audit.c security_task_getsecid(current, &sid); current 2114 kernel/audit.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 2115 kernel/audit.c if (current->signal) current 2116 kernel/audit.c tty = tty_kref_get(current->signal->tty); current 2117 kernel/audit.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 2129 kernel/audit.c char comm[sizeof(current->comm)]; current 2141 kernel/audit.c task_ppid_nr(current), current 2142 kernel/audit.c task_tgid_nr(current), current 2143 kernel/audit.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 2153 kernel/audit.c audit_get_sessionid(current)); current 2156 kernel/audit.c audit_log_untrustedstring(ab, get_task_comm(comm, current)); current 2157 kernel/audit.c audit_log_d_path_exe(ab, current->mm); current 2189 kernel/audit.c if (!audit_loginuid_set(current)) current 2219 kernel/audit.c uid = from_kuid(&init_user_ns, task_uid(current)); current 2224 kernel/audit.c audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid); current 2247 kernel/audit.c oldloginuid = audit_get_loginuid(current); current 2248 kernel/audit.c oldsessionid = audit_get_sessionid(current); current 2261 kernel/audit.c current->sessionid = sessionid; current 2262 kernel/audit.c current->loginuid = loginuid; current 2283 kernel/audit.c audit_sig_pid = task_tgid_nr(current); current 2284 kernel/audit.c auid = audit_get_loginuid(current); current 2289 kernel/audit.c security_task_getsecid(current, &audit_sig_sid); current 263 kernel/audit_watch.c audit_filter_inodes(current, audit_context()); current 1338 kernel/auditfilter.c pid = task_pid_nr(current); current 1348 kernel/auditfilter.c result = audit_uid_comparator(audit_get_loginuid(current), current 1352 kernel/auditfilter.c result = audit_comparator(audit_loginuid_set(current), current 1364 kernel/auditfilter.c security_task_getsecid(current, &sid); current 1370 kernel/auditfilter.c result = audit_exe_compare(current, e->rule.exe); current 450 kernel/auditsc.c cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); current 1002 kernel/auditsc.c const char __user *p = (const char __user *)current->mm->arg_start; current 1057 kernel/auditsc.c send_sig(SIGKILL, current, 0); current 1424 kernel/auditsc.c res = get_cmdline(current, buf, MAX_PROCTITLE_AUDIT_LEN); current 1452 kernel/auditsc.c context->personality = current->personality; current 1597 kernel/auditsc.c if (tsk == current && !context->dummy && context->in_syscall) { current 1646 kernel/auditsc.c if (auditd_test_task(current)) current 1650 kernel/auditsc.c context->arch = syscall_get_arch(current); current 1709 kernel/auditsc.c audit_filter_syscall(current, context, current 1711 kernel/auditsc.c audit_filter_inodes(current, context); current 1893 kernel/auditsc.c get_fs_pwd(current->fs, &context->pwd); current 2487 kernel/auditsc.c context->capset.pid = task_tgid_nr(current); current 2553 kernel/auditsc.c char comm[sizeof(current->comm)]; current 2555 kernel/auditsc.c auid = audit_get_loginuid(current); current 2556 kernel/auditsc.c sessionid = audit_get_sessionid(current); current 2565 kernel/auditsc.c audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); current 2566 kernel/auditsc.c audit_log_untrustedstring(ab, get_task_comm(comm, current)); current 2567 kernel/auditsc.c audit_log_d_path_exe(ab, current->mm); current 2616 kernel/auditsc.c signr, syscall_get_arch(current), syscall, current 2617 kernel/auditsc.c in_compat_syscall(), KSTK_EIP(current), code); current 776 kernel/bpf/cgroup.c cgrp = task_dfl_cgroup(current); current 935 kernel/bpf/cgroup.c cgrp = task_dfl_cgroup(current); current 660 kernel/bpf/devmap.c return __dev_map_update_elem(current->nsproxy->net_ns, current 717 kernel/bpf/devmap.c return __dev_map_hash_update_elem(current->nsproxy->net_ns, current 157 kernel/bpf/helpers.c struct task_struct *task = current; current 173 kernel/bpf/helpers.c struct task_struct *task = current; current 193 kernel/bpf/helpers.c struct task_struct *task = current; current 318 kernel/bpf/helpers.c struct cgroup *cgrp = task_dfl_cgroup(current); current 99 kernel/bpf/offload.c offload->netdev = dev_get_by_index(current->nsproxy->net_ns, current 364 kernel/bpf/offload.c struct net *net = current->nsproxy->net_ns; current 307 kernel/bpf/stackmap.c if (!user || !current || !current->mm || irq_work_busy || current 308 kernel/bpf/stackmap.c down_read_trylock(¤t->mm->mmap_sem) == 0) { current 319 kernel/bpf/stackmap.c vma = find_vma(current->mm, ips[i]); current 333 kernel/bpf/stackmap.c up_read(¤t->mm->mmap_sem); current 335 kernel/bpf/stackmap.c work->sem = ¤t->mm->mmap_sem; current 342 kernel/bpf/stackmap.c rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); current 7616 kernel/bpf/verifier.c if (signal_pending(current)) current 48 kernel/capability.c char name[sizeof(current->comm)]; current 51 kernel/capability.c get_task_comm(name, current)); current 72 kernel/capability.c char name[sizeof(current->comm)]; current 75 kernel/capability.c get_task_comm(name, current)); current 121 kernel/capability.c if (pid && (pid != task_pid_vnr(current))) { current 134 kernel/capability.c ret = security_capget(current, pEp, pIp, pPp); current 239 kernel/capability.c if (pid != 0 && pid != task_pid_vnr(current)) current 377 kernel/capability.c current->flags |= PF_SUPERPRIV; current 286 kernel/cgroup/cgroup-v1.c struct pid_namespace *ns = task_active_pid_ns(current); current 321 kernel/cgroup/cgroup-v1.c l->key.ns = get_pid_ns(task_active_pid_ns(current)); current 1058 kernel/cgroup/cgroup-v1.c task_tgid_nr(current), current->comm); current 1374 kernel/cgroup/cgroup.c cset = current->nsproxy->cgroup_ns->root_cset; current 1856 kernel/cgroup/cgroup.c if (current->nsproxy->cgroup_ns == &init_cgroup_ns) { current 2198 kernel/cgroup/cgroup.c ctx->ns = current->nsproxy->cgroup_ns; current 2847 kernel/cgroup/cgroup.c tsk = current; current 3738 kernel/cgroup/cgroup.c struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; current 4733 kernel/cgroup/cgroup.c struct cgroup_namespace *ns = current->nsproxy->cgroup_ns; current 5199 kernel/cgroup/cgroup.c current->comm, current->pid, ss->name); current 5917 kernel/cgroup/cgroup.c current->nsproxy->cgroup_ns); current 6046 kernel/cgroup/cgroup.c cset = task_css_set(current); current 6406 kernel/cgroup/cgroup.c cset = task_css_set(current); current 1821 kernel/cgroup/cpuset.c ret = task_cs(current) == cpuset_being_rebound; current 2865 kernel/cgroup/cpuset.c set_cpus_allowed_ptr(task, current->cpus_ptr); current 2866 kernel/cgroup/cpuset.c task->mems_allowed = current->mems_allowed; current 3326 kernel/cgroup/cpuset.c nodes_setall(current->mems_allowed); current 3361 kernel/cgroup/cpuset.c return nodes_intersects(*nodemask, current->mems_allowed); current 3425 kernel/cgroup/cpuset.c if (node_isset(node, current->mems_allowed)) current 3431 kernel/cgroup/cpuset.c if (unlikely(tsk_is_oom_victim(current))) current 3436 kernel/cgroup/cpuset.c if (current->flags & PF_EXITING) /* Let dying task have memory */ current 3443 kernel/cgroup/cpuset.c cs = nearest_hardwall_ancestor(task_cs(current)); current 3480 kernel/cgroup/cpuset.c return *rotor = next_node_in(*rotor, current->mems_allowed); current 3485 kernel/cgroup/cpuset.c if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) current 3486 kernel/cgroup/cpuset.c current->cpuset_mem_spread_rotor = current 3487 kernel/cgroup/cpuset.c node_random(¤t->mems_allowed); current 3489 kernel/cgroup/cpuset.c return cpuset_spread_node(¤t->cpuset_mem_spread_rotor); current 3494 kernel/cgroup/cpuset.c if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE) current 3495 kernel/cgroup/cpuset.c current->cpuset_slab_spread_rotor = current 3496 kernel/cgroup/cpuset.c node_random(¤t->mems_allowed); current 3498 kernel/cgroup/cpuset.c return cpuset_spread_node(¤t->cpuset_slab_spread_rotor); current 3532 kernel/cgroup/cpuset.c cgrp = task_cs(current)->css.cgroup; current 3536 kernel/cgroup/cpuset.c nodemask_pr_args(¤t->mems_allowed)); current 3570 kernel/cgroup/cpuset.c fmeter_markevent(&task_cs(current)->fmeter); current 3598 kernel/cgroup/cpuset.c current->nsproxy->cgroup_ns); current 53 kernel/cgroup/debug.c cset = task_css_set(current); current 82 kernel/cgroup/debug.c count = refcount_read(&task_css_set(current)->refcount); current 99 kernel/cgroup/debug.c cset = task_css_set(current); current 111 kernel/cgroup/freezer.c if (current->frozen) current 115 kernel/cgroup/freezer.c current->frozen = true; current 116 kernel/cgroup/freezer.c cgrp = task_dfl_cgroup(current); current 136 kernel/cgroup/freezer.c cgrp = task_dfl_cgroup(current); current 140 kernel/cgroup/freezer.c WARN_ON_ONCE(!current->frozen); current 141 kernel/cgroup/freezer.c current->frozen = false; current 142 kernel/cgroup/freezer.c } else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) { current 143 kernel/cgroup/freezer.c spin_lock(¤t->sighand->siglock); current 144 kernel/cgroup/freezer.c current->jobctl |= JOBCTL_TRAP_FREEZE; current 146 kernel/cgroup/freezer.c spin_unlock(¤t->sighand->siglock); current 75 kernel/cgroup/namespace.c cset = task_css_set(current); current 223 kernel/cgroup/pids.c css = task_css_check(current, pids_cgrp_id, true); current 243 kernel/cgroup/pids.c css = task_css_check(current, pids_cgrp_id, true); current 79 kernel/cgroup/rdma.c return css_rdmacg(task_get_css(current, rdma_cgrp_id)); current 135 kernel/compat.c old_set = current->blocked.sig[0]; current 142 kernel/compat.c new_blocked = current->blocked; current 65 kernel/context_tracking.c WARN_ON_ONCE(!current->mm); current 81 kernel/context_tracking.c vtime_user_enter(current); current 158 kernel/context_tracking.c vtime_user_exit(current); current 23 kernel/cred.c current->comm, current->pid, ##__VA_ARGS__) current 29 kernel/cred.c current->comm, current->pid, ##__VA_ARGS__); \ current 144 kernel/cred.c BUG_ON(cred == current->cred); current 145 kernel/cred.c BUG_ON(cred == current->real_cred); current 252 kernel/cred.c struct task_struct *task = current; current 436 kernel/cred.c struct task_struct *task = current; current 543 kernel/cred.c const struct cred *old = current->cred; current 565 kernel/cred.c rcu_assign_pointer(current->cred, new); current 584 kernel/cred.c const struct cred *override = current->cred; current 593 kernel/cred.c rcu_assign_pointer(current->cred, old); current 842 kernel/cred.c dump_invalid_creds(cred, "Specified", current); current 289 kernel/debug/debug_core.c if (current->mm) { current 293 kernel/debug/debug_core.c if (!current->vmacache.vmas[i]) current 295 kernel/debug/debug_core.c flush_cache_range(current->vmacache.vmas[i], current 556 kernel/debug/debug_core.c kgdb_info[cpu].task = current; current 675 kernel/debug/debug_core.c kgdb_contthread = current; current 549 kernel/debug/gdbstub.c if (kgdb_usethread && kgdb_usethread != current) { current 622 kernel/debug/gdbstub.c !(!kgdb_usethread || kgdb_usethread == current) || current 746 kernel/debug/gdbstub.c ks->threadid = shadow_pid(current->pid); current 964 kernel/debug/gdbstub.c int_to_threadref(thref, shadow_pid(current->pid)); current 1046 kernel/debug/gdbstub.c if (kgdb_contthread && kgdb_contthread != current) { current 61 kernel/delayacct.c current->delays->blkio_start = ktime_get_ns(); current 154 kernel/delayacct.c current->delays->freepages_start = ktime_get_ns(); current 160 kernel/delayacct.c ¤t->delays->lock, current 161 kernel/delayacct.c ¤t->delays->freepages_start, current 162 kernel/delayacct.c ¤t->delays->freepages_delay, current 163 kernel/delayacct.c ¤t->delays->freepages_count); current 168 kernel/delayacct.c current->delays->thrashing_start = ktime_get_ns(); current 173 kernel/delayacct.c delayacct_end(¤t->delays->lock, current 174 kernel/delayacct.c ¤t->delays->thrashing_start, current 175 kernel/delayacct.c ¤t->delays->thrashing_delay, current 176 kernel/delayacct.c ¤t->delays->thrashing_count); current 1097 kernel/dma/debug.c struct vm_struct *stack_vm_area = task_stack_vm_area(current); current 1114 kernel/dma/debug.c addr = (u8 *)current->stack + i * PAGE_SIZE + offset; current 206 kernel/events/callchain.c if (current->mm) current 207 kernel/events/callchain.c regs = task_pt_regs(current); current 82 kernel/events/core.c if (p != current) current 224 kernel/events/core.c if (ctx->task != current) { current 335 kernel/events/core.c if (WARN_ON_ONCE(task != current)) current 755 kernel/events/core.c cgrp = perf_cgroup_from_task(current, event->ctx); current 970 kernel/events/core.c struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); current 2567 kernel/events/core.c perf_event_sched_in(cpuctx, task_ctx, current); current 2601 kernel/events/core.c reprogram = (ctx->task == current); current 2626 kernel/events/core.c struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); current 2767 kernel/events/core.c ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); current 2776 kernel/events/core.c ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); current 3846 kernel/events/core.c perf_event_sched_in(cpuctx, task_ctx, current); current 3899 kernel/events/core.c ctx = current->perf_event_ctxp[ctxn]; current 3918 kernel/events/core.c ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); current 4048 kernel/events/core.c event->hw.target != current) { current 4208 kernel/events/core.c task = current; current 5298 kernel/events/core.c mutex_lock(¤t->perf_event_mutex); current 5299 kernel/events/core.c list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { current 5304 kernel/events/core.c mutex_unlock(¤t->perf_event_mutex); current 5314 kernel/events/core.c mutex_lock(¤t->perf_event_mutex); current 5315 kernel/events/core.c list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { current 5320 kernel/events/core.c mutex_unlock(¤t->perf_event_mutex); current 6073 kernel/events/core.c regs_user->abi = perf_reg_abi(current); current 6075 kernel/events/core.c } else if (!(current->flags & PF_KTHREAD)) { current 6087 kernel/events/core.c regs_intr->abi = perf_reg_abi(current); current 6201 kernel/events/core.c data->tid_entry.pid = perf_event_pid(event, current); current 6202 kernel/events/core.c data->tid_entry.tid = perf_event_tid(event, current); current 6545 kernel/events/core.c if (current->mm != NULL) { current 6567 kernel/events/core.c bool crosstask = event->ctx->task && event->ctx->task != current; current 6868 kernel/events/core.c ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); current 6917 kernel/events/core.c ctx = current->perf_event_ctxp[ctxn]; current 7068 kernel/events/core.c task_event->event_id.ppid = perf_event_pid(event, current); current 7069 kernel/events/core.c task_event->event_id.ptid = perf_event_tid(event, current); current 7416 kernel/events/core.c mmap_event->event_id.pid = perf_event_pid(event, current); current 7417 kernel/events/core.c mmap_event->event_id.tid = perf_event_tid(event, current); current 7654 kernel/events/core.c ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); current 8122 kernel/events/core.c rec.pid = perf_event_pid(event, current); current 8123 kernel/events/core.c rec.tid = perf_event_tid(event, current); current 8750 kernel/events/core.c if (task && task != current) { current 9596 kernel/events/core.c if (!(event->attr.exclude_idle && is_idle_task(current))) current 10477 kernel/events/core.c event->ns = get_pid_ns(task_active_pid_ns(current)); current 11283 kernel/events/core.c event->owner = current; current 11297 kernel/events/core.c mutex_lock(¤t->perf_event_mutex); current 11298 kernel/events/core.c list_add_tail(&event->owner_entry, ¤t->perf_event_list); current 11299 kernel/events/core.c mutex_unlock(¤t->perf_event_mutex); current 11575 kernel/events/core.c WARN_ON_ONCE(child != current); current 11608 kernel/events/core.c put_task_struct(current); /* cannot be last */ current 11995 kernel/events/core.c struct task_struct *parent = current; current 507 kernel/events/hw_breakpoint.c if (irqs_disabled() && bp->ctx && bp->ctx->task == current) current 1397 kernel/events/uprobes.c if (!fatal_signal_pending(current) && current 1490 kernel/events/uprobes.c struct mm_struct *mm = current->mm; current 1538 kernel/events/uprobes.c struct mm_struct *mm = current->mm; current 1703 kernel/events/uprobes.c struct uprobe_task *utask = current->utask; current 1753 kernel/events/uprobes.c if (!current->utask) current 1754 kernel/events/uprobes.c current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); current 1755 kernel/events/uprobes.c return current->utask; current 1789 kernel/events/uprobes.c current->comm, current->pid, msg); current 1794 kernel/events/uprobes.c if (current->flags & PF_EXITING) current 1797 kernel/events/uprobes.c if (!__create_xol_area(current->utask->dup_xol_addr) && current 1798 kernel/events/uprobes.c !fatal_signal_pending(current)) current 1799 kernel/events/uprobes.c uprobe_warn(current, "dup xol area"); current 1807 kernel/events/uprobes.c struct uprobe_task *utask = current->utask; current 1808 kernel/events/uprobes.c struct mm_struct *mm = current->mm; current 1847 kernel/events/uprobes.c area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */ current 1884 kernel/events/uprobes.c current->pid, current->tgid); current 1912 kernel/events/uprobes.c uprobe_warn(current, "handle tail call"); current 1954 kernel/events/uprobes.c xol_free_insn_slot(current); current 1974 kernel/events/uprobes.c struct task_struct *t = current; current 2052 kernel/events/uprobes.c struct mm_struct *mm = current->mm; current 2106 kernel/events/uprobes.c unapply_uprobe(uprobe, current->mm); current 2143 kernel/events/uprobes.c utask = current->utask; current 2174 kernel/events/uprobes.c uprobe_warn(current, "handle uretprobe, sending SIGILL."); current 2208 kernel/events/uprobes.c send_sig(SIGTRAP, current, 0); current 2283 kernel/events/uprobes.c xol_free_insn_slot(current); current 2285 kernel/events/uprobes.c spin_lock_irq(¤t->sighand->siglock); current 2287 kernel/events/uprobes.c spin_unlock_irq(¤t->sighand->siglock); current 2290 kernel/events/uprobes.c uprobe_warn(current, "execute the probed insn, sending SIGILL."); current 2312 kernel/events/uprobes.c utask = current->utask; current 2325 kernel/events/uprobes.c if (!current->mm) current 2328 kernel/events/uprobes.c if (!test_bit(MMF_HAS_UPROBES, ¤t->mm->flags) && current 2329 kernel/events/uprobes.c (!current->utask || !current->utask->return_instances)) current 2342 kernel/events/uprobes.c struct uprobe_task *utask = current->utask; current 2344 kernel/events/uprobes.c if (!current->mm || !utask || !utask->active_uprobe) current 40 kernel/exec_domain.c unsigned int old = current->personality; current 294 kernel/exit.c retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); current 349 kernel/exit.c struct task_struct *c, *g, *p = current; current 437 kernel/exit.c struct mm_struct *mm = current->mm; current 440 kernel/exit.c exit_mm_release(current, mm); current 458 kernel/exit.c self.task = current; current 477 kernel/exit.c BUG_ON(mm != current->active_mm); current 479 kernel/exit.c task_lock(current); current 480 kernel/exit.c current->mm = NULL; current 482 kernel/exit.c enter_lazy_tlb(mm, current); current 483 kernel/exit.c task_unlock(current); current 694 kernel/exit.c free = stack_not_used(current); current 702 kernel/exit.c current->comm, task_pid_nr(current), free); current 713 kernel/exit.c struct task_struct *tsk = current; current 754 kernel/exit.c current->comm, task_pid_nr(current), current 826 kernel/exit.c if (unlikely(current->pi_state_cache)) current 827 kernel/exit.c kfree(current->pi_state_cache); current 878 kernel/exit.c struct signal_struct *sig = current->signal; current 884 kernel/exit.c else if (!thread_group_empty(current)) { current 885 kernel/exit.c struct sighand_struct *const sighand = current->sighand; current 894 kernel/exit.c zap_other_threads(current); current 1012 kernel/exit.c struct signal_struct *psig = current->signal; current 1037 kernel/exit.c spin_lock_irq(¤t->sighand->siglock); current 1062 kernel/exit.c spin_unlock_irq(¤t->sighand->siglock); current 1427 kernel/exit.c wo->child_wait.private = current; current 1428 kernel/exit.c add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); current 1443 kernel/exit.c tsk = current; current 1455 kernel/exit.c } while_each_thread(current, tsk); current 1462 kernel/exit.c if (!signal_pending(current)) { current 1469 kernel/exit.c remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); current 1523 kernel/exit.c pid = get_task_pid(current, PIDTYPE_PGID); current 1605 kernel/exit.c pid = get_task_pid(current, PIDTYPE_PGID); current 530 kernel/fork.c if (fatal_signal_pending(current)) { current 688 kernel/fork.c WARN_ON_ONCE(mm == current->mm); current 689 kernel/fork.c WARN_ON_ONCE(mm == current->active_mm); current 738 kernel/fork.c WARN_ON(tsk == current); current 1032 kernel/fork.c if (current->mm) { current 1033 kernel/fork.c mm->flags = current->mm->flags & MMF_INIT_MASK; current 1034 kernel/fork.c mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; current 1068 kernel/fork.c return mm_init(mm, current, current_user_ns()); current 1229 kernel/fork.c if (mm && mm != current->mm && current 1399 kernel/fork.c oldmm = current->mm; current 1413 kernel/fork.c mm = dup_mm(tsk, current->mm); current 1428 kernel/fork.c struct fs_struct *fs = current->fs; current 1454 kernel/fork.c oldf = current->files; current 1476 kernel/fork.c struct io_context *ioc = current->io_context; current 1504 kernel/fork.c refcount_inc(¤t->sighand->count); current 1513 kernel/fork.c spin_lock_irq(¤t->sighand->siglock); current 1514 kernel/fork.c memcpy(sig->action, current->sighand->action, sizeof(sig->action)); current 1515 kernel/fork.c spin_unlock_irq(¤t->sighand->siglock); current 1576 kernel/fork.c task_lock(current->group_leader); current 1577 kernel/fork.c memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); current 1578 kernel/fork.c task_unlock(current->group_leader); current 1585 kernel/fork.c sig->oom_score_adj = current->signal->oom_score_adj; current 1586 kernel/fork.c sig->oom_score_adj_min = current->signal->oom_score_adj_min; current 1602 kernel/fork.c assert_spin_locked(¤t->sighand->siglock); current 1605 kernel/fork.c get_seccomp_filter(current); current 1606 kernel/fork.c p->seccomp = current->seccomp; current 1613 kernel/fork.c if (task_no_new_privs(current)) current 1628 kernel/fork.c current->clear_child_tid = tidptr; current 1630 kernel/fork.c return task_pid_vnr(current); current 1804 kernel/fork.c current->signal->flags & SIGNAL_UNKILLABLE) current 1813 kernel/fork.c (task_active_pid_ns(current) != current 1814 kernel/fork.c current->nsproxy->pid_ns_for_children)) current 1837 kernel/fork.c spin_lock_irq(¤t->sighand->siglock); current 1839 kernel/fork.c hlist_add_head(&delayed.node, ¤t->signal->multiprocess); current 1841 kernel/fork.c spin_unlock_irq(¤t->sighand->siglock); current 1843 kernel/fork.c if (signal_pending(current)) current 1847 kernel/fork.c p = dup_task_struct(current, node); current 1878 kernel/fork.c current->flags &= ~PF_NPROC_EXCEEDED; current 1920 kernel/fork.c p->default_timer_slack_ns = current->timer_slack_ns; current 2084 kernel/fork.c p->group_leader = current->group_leader; current 2085 kernel/fork.c p->tgid = current->tgid; current 2088 kernel/fork.c p->exit_signal = current->group_leader->exit_signal; current 2103 kernel/fork.c cgroup_threadgroup_change_begin(current); current 2133 kernel/fork.c p->real_parent = current->real_parent; current 2134 kernel/fork.c p->parent_exec_id = current->parent_exec_id; current 2136 kernel/fork.c p->real_parent = current; current 2137 kernel/fork.c p->parent_exec_id = current->self_exec_id; current 2142 kernel/fork.c spin_lock(¤t->sighand->siglock); current 2159 kernel/fork.c if (fatal_signal_pending(current)) { current 2175 kernel/fork.c init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); current 2176 kernel/fork.c init_task_pid(p, PIDTYPE_SID, task_session(current)); current 2183 kernel/fork.c p->signal->tty = tty_kref_get(current->signal->tty); current 2198 kernel/fork.c current->signal->nr_threads++; current 2199 kernel/fork.c atomic_inc(¤t->signal->live); current 2200 kernel/fork.c refcount_inc(¤t->signal->sigcnt); current 2212 kernel/fork.c spin_unlock(¤t->sighand->siglock); current 2218 kernel/fork.c cgroup_threadgroup_change_end(current); current 2227 kernel/fork.c spin_unlock(¤t->sighand->siglock); current 2231 kernel/fork.c cgroup_threadgroup_change_end(current); current 2284 kernel/fork.c spin_lock_irq(¤t->sighand->siglock); current 2286 kernel/fork.c spin_unlock_irq(¤t->sighand->siglock); current 2352 kernel/fork.c if (likely(!ptrace_event_enabled(current, trace))) current 2366 kernel/fork.c trace_sched_process_fork(current, p); current 2747 kernel/fork.c if (!thread_group_empty(current)) current 2751 kernel/fork.c if (refcount_read(¤t->sighand->count) > 1) current 2767 kernel/fork.c struct fs_struct *fs = current->fs; current 2788 kernel/fork.c struct files_struct *fd = current->files; current 2869 kernel/fork.c exit_sem(current); current 2873 kernel/fork.c exit_shm(current); current 2874 kernel/fork.c shm_init_task(current); current 2878 kernel/fork.c switch_task_namespaces(current, new_nsproxy); current 2880 kernel/fork.c task_lock(current); current 2883 kernel/fork.c fs = current->fs; current 2885 kernel/fork.c current->fs = new_fs; current 2894 kernel/fork.c fd = current->files; current 2895 kernel/fork.c current->files = new_fd; current 2899 kernel/fork.c task_unlock(current); current 2908 kernel/fork.c perf_event_namespaces(current); current 2938 kernel/fork.c struct task_struct *task = current; current 61 kernel/freezer.c long save = current->state; current 63 kernel/freezer.c pr_debug("%s entered refrigerator\n", current->comm); current 69 kernel/freezer.c current->flags |= PF_FROZEN; current 70 kernel/freezer.c if (!freezing(current) || current 72 kernel/freezer.c current->flags &= ~PF_FROZEN; current 75 kernel/freezer.c if (!(current->flags & PF_FROZEN)) current 81 kernel/freezer.c pr_debug("%s left refrigerator\n", current->comm); current 171 kernel/freezer.c current->flags &= ~PF_NOFREEZE; current 575 kernel/futex.c struct mm_struct *mm = current->mm; current 770 kernel/futex.c struct mm_struct *mm = current->mm; current 774 kernel/futex.c ret = fixup_user_fault(current, mm, (unsigned long)uaddr, current 831 kernel/futex.c if (likely(current->pi_state_cache)) current 845 kernel/futex.c current->pi_state_cache = pi_state; current 852 kernel/futex.c struct futex_pi_state *pi_state = current->pi_state_cache; current 855 kernel/futex.c current->pi_state_cache = NULL; current 895 kernel/futex.c if (current->pi_state_cache) { current 905 kernel/futex.c current->pi_state_cache = pi_state; current 1730 kernel/futex.c char comm[sizeof(current->comm)]; current 1736 kernel/futex.c get_task_comm(comm, current), oparg); current 2358 kernel/futex.c prio = min(current->normal_prio, MAX_RT_PRIO); current 2362 kernel/futex.c q->task = current; current 2496 kernel/futex.c if (oldowner != current) { current 2517 kernel/futex.c WARN_ON_ONCE(argowner != current); current 2518 kernel/futex.c if (oldowner == current) { current 2655 kernel/futex.c if (q->pi_state->owner != current) current 2656 kernel/futex.c ret = fixup_pi_state_owner(uaddr, q, current); current 2668 kernel/futex.c if (q->pi_state->owner == current) { current 2677 kernel/futex.c if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { current 2816 kernel/futex.c current->timer_slack_ns); current 2842 kernel/futex.c if (!signal_pending(current)) current 2849 kernel/futex.c restart = ¤t->restart_block; current 2920 kernel/futex.c ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, current 2993 kernel/futex.c ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); current 3038 kernel/futex.c if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { current 3086 kernel/futex.c u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); current 3128 kernel/futex.c if (pi_state->owner != current) current 3265 kernel/futex.c else if (signal_pending(current)) current 3333 kernel/futex.c current->timer_slack_ns); current 3391 kernel/futex.c if (q.pi_state && (q.pi_state->owner != current)) { current 3393 kernel/futex.c ret = fixup_pi_state_owner(uaddr2, &q, current); current 3394 kernel/futex.c if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { current 3439 kernel/futex.c if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { current 3508 kernel/futex.c current->robust_list = head; current 3534 kernel/futex.c p = current; current 4075 kernel/futex.c current->compat_robust_list = head; current 4095 kernel/futex.c p = current; current 275 kernel/hung_task.c set_user_nice(current, 0); current 972 kernel/irq/manage.c set_cpus_allowed_ptr(current, mask); current 1027 kernel/irq/manage.c struct task_struct *tsk = current; current 1031 kernel/irq/manage.c if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) current 1082 kernel/irq/manage.c task_work_add(current, &on_exit_work, false); current 1104 kernel/irq/manage.c task_work_cancel(current, irq_thread_dtor); current 656 kernel/kallsyms.c if (has_capability_noaudit(current, CAP_SYSLOG)) current 101 kernel/kcov.c t = current; current 122 kernel/kcov.c t = current; current 378 kernel/kcov.c t = current; current 406 kernel/kcov.c if (unused != 0 || current->kcov != kcov) current 408 kernel/kcov.c t = current; current 303 kernel/kexec_core.c if (fatal_signal_pending(current)) current 1074 kernel/kexec_core.c prstatus.pr_pid = current->pid; current 1876 kernel/kprobes.c hash = hash_ptr(current, KPROBE_HASH_BITS); current 1885 kernel/kprobes.c ri->task = current; current 70 kernel/kthread.c current->set_child_tid = (__force void __user *)kthread; current 103 kernel/kthread.c return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); current 126 kernel/kthread.c return __kthread_should_park(current); current 145 kernel/kthread.c if (unlikely(freezing(current))) current 210 kernel/kthread.c __kthread_parkme(to_kthread(current)); current 243 kernel/kthread.c current->vfork_done = &self->exited; current 247 kernel/kthread.c create->result = current; current 275 kernel/kthread.c current->pref_node_fork = create->node; current 514 kernel/kthread.c if (k != current) { current 570 kernel/kthread.c struct task_struct *tsk = current; current 578 kernel/kthread.c current->flags |= PF_NOFREEZE; current 642 kernel/kthread.c WARN_ON(worker->task && worker->task != current); current 643 kernel/kthread.c worker->task = current; current 672 kernel/kthread.c } else if (!freezing(current)) current 1208 kernel/kthread.c if (!(current->flags & PF_KTHREAD)) current 1210 kernel/kthread.c kthread = to_kthread(current); current 1234 kernel/kthread.c if (current->flags & PF_KTHREAD) { current 1235 kernel/kthread.c kthread = to_kthread(current); current 91 kernel/livepatch/patch.c patch_state = current->patch_state; current 309 kernel/livepatch/transition.c if (task_running(rq, task) && task != current) { current 614 kernel/livepatch/transition.c child->patch_state = current->patch_state; current 104 kernel/locking/lockdep.c current->lockdep_recursion++; current 118 kernel/locking/lockdep.c current->lockdep_recursion--; current 382 kernel/locking/lockdep.c current->lockdep_recursion++; current 388 kernel/locking/lockdep.c current->lockdep_recursion--; current 708 kernel/locking/lockdep.c if (p->state == TASK_RUNNING && p != current) current 1620 kernel/locking/lockdep.c struct task_struct *curr = current; current 1653 kernel/locking/lockdep.c struct task_struct *curr = current; current 1722 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 1726 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 1751 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 1755 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 2307 kernel/locking/lockdep.c if (current->hardirq_context) current 2310 kernel/locking/lockdep.c if (current->softirq_context) current 2716 kernel/locking/lockdep.c pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); current 3375 kernel/locking/lockdep.c struct task_struct *curr = current; current 3402 kernel/locking/lockdep.c if (unlikely(!debug_locks || current->lockdep_recursion)) current 3405 kernel/locking/lockdep.c if (unlikely(current->hardirqs_enabled)) { current 3433 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) current 3436 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 3438 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 3447 kernel/locking/lockdep.c struct task_struct *curr = current; current 3449 kernel/locking/lockdep.c if (unlikely(!debug_locks || current->lockdep_recursion)) current 3477 kernel/locking/lockdep.c struct task_struct *curr = current; current 3479 kernel/locking/lockdep.c if (unlikely(!debug_locks || current->lockdep_recursion)) current 3494 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 3509 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 3517 kernel/locking/lockdep.c struct task_struct *curr = current; current 3519 kernel/locking/lockdep.c if (unlikely(!debug_locks || current->lockdep_recursion)) current 3758 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) current 3762 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 3764 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 3820 kernel/locking/lockdep.c struct task_struct *curr = current; current 3974 kernel/locking/lockdep.c lockdep_print_held_locks(current); current 4123 kernel/locking/lockdep.c struct task_struct *curr = current; current 4167 kernel/locking/lockdep.c struct task_struct *curr = current; current 4223 kernel/locking/lockdep.c struct task_struct *curr = current; current 4305 kernel/locking/lockdep.c struct task_struct *curr = current; current 4325 kernel/locking/lockdep.c struct task_struct *curr = current; current 4352 kernel/locking/lockdep.c struct task_struct *curr = current; current 4372 kernel/locking/lockdep.c struct task_struct *curr = current; current 4407 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { current 4411 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { current 4424 kernel/locking/lockdep.c DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); current 4427 kernel/locking/lockdep.c DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); current 4432 kernel/locking/lockdep.c print_irqtrace_events(current); current 4442 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4446 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4449 kernel/locking/lockdep.c check_chain_key(current); current 4450 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4459 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4463 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4466 kernel/locking/lockdep.c check_chain_key(current); current 4467 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4482 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4488 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4492 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4502 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4507 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4510 kernel/locking/lockdep.c check_chain_key(current); current 4511 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4521 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4527 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4529 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4542 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4548 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4550 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4561 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4567 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4569 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4578 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4584 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4586 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4622 kernel/locking/lockdep.c struct task_struct *curr = current; current 4663 kernel/locking/lockdep.c struct task_struct *curr = current; current 4717 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4722 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4725 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4737 kernel/locking/lockdep.c if (unlikely(current->lockdep_recursion)) current 4742 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4744 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 4761 kernel/locking/lockdep.c lockdep_init_task(current); current 4762 kernel/locking/lockdep.c memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); current 4907 kernel/locking/lockdep.c return current == lockdep_selftest_task_struct; current 4969 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 4981 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 5028 kernel/locking/lockdep.c current->lockdep_recursion = 1; current 5032 kernel/locking/lockdep.c current->lockdep_recursion = 0; current 5285 kernel/locking/lockdep.c struct task_struct *curr = current; current 5318 kernel/locking/lockdep.c current->comm, task_pid_nr(current)); current 5321 kernel/locking/lockdep.c lockdep_print_held_locks(current); current 5328 kernel/locking/lockdep.c if (unlikely(current->lockdep_depth > 0)) current 5376 kernel/locking/lockdep.c struct task_struct *curr = current; current 5400 kernel/locking/lockdep.c struct task_struct *curr = current; current 444 kernel/locking/locktorture.c if (!rt_task(current)) { current 472 kernel/locking/locktorture.c sched_setscheduler_nocheck(current, policy, ¶m); current 625 kernel/locking/locktorture.c set_user_nice(current, MAX_NICE); current 662 kernel/locking/locktorture.c set_user_nice(current, MAX_NICE); current 92 kernel/locking/mutex.c if (unlikely(__mutex_owner(lock) == current)) current 109 kernel/locking/mutex.c unsigned long owner, curr = (unsigned long)current; current 168 kernel/locking/mutex.c unsigned long curr = (unsigned long)current; current 179 kernel/locking/mutex.c unsigned long curr = (unsigned long)current; current 211 kernel/locking/mutex.c debug_mutex_add_waiter(lock, waiter, current); current 232 kernel/locking/mutex.c DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); current 422 kernel/locking/mutex.c if (owner != current) current 1003 kernel/locking/mutex.c waiter.task = current; current 1021 kernel/locking/mutex.c if (signal_pending_state(state, current)) { current 1071 kernel/locking/mutex.c mutex_remove_waiter(lock, &waiter, current); current 1090 kernel/locking/mutex.c mutex_remove_waiter(lock, &waiter, current); current 1242 kernel/locking/mutex.c DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current); current 81 kernel/locking/rtmutex-debug.c if (task && task != current) { current 113 kernel/locking/rtmutex-debug.c current->comm, task_pid_nr(current)); current 116 kernel/locking/rtmutex-debug.c current->comm, task_pid_nr(current)); current 123 kernel/locking/rtmutex-debug.c debug_show_held_locks(current); current 130 kernel/locking/rtmutex-debug.c current->comm, task_pid_nr(current)); current 145 kernel/locking/rtmutex-debug.c DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); current 1022 kernel/locking/rtmutex.c raw_spin_lock(¤t->pi_lock); current 1033 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(current, waiter); current 1034 kernel/locking/rtmutex.c rt_mutex_adjust_prio(current); current 1058 kernel/locking/rtmutex.c raw_spin_unlock(¤t->pi_lock); current 1076 kernel/locking/rtmutex.c raw_spin_lock(¤t->pi_lock); current 1078 kernel/locking/rtmutex.c current->pi_blocked_on = NULL; current 1079 kernel/locking/rtmutex.c raw_spin_unlock(¤t->pi_lock); current 1115 kernel/locking/rtmutex.c next_lock, NULL, current); current 1175 kernel/locking/rtmutex.c if (try_to_take_rt_mutex(lock, current, waiter)) current 1184 kernel/locking/rtmutex.c if (signal_pending(current)) current 1251 kernel/locking/rtmutex.c if (try_to_take_rt_mutex(lock, current, NULL)) { current 1262 kernel/locking/rtmutex.c ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); current 1293 kernel/locking/rtmutex.c int ret = try_to_take_rt_mutex(lock, current, NULL); current 1411 kernel/locking/rtmutex.c if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) current 1426 kernel/locking/rtmutex.c likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) current 1436 kernel/locking/rtmutex.c if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) current 1460 kernel/locking/rtmutex.c if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) current 1905 kernel/locking/rtmutex.c try_to_take_rt_mutex(lock, current, waiter); current 1910 kernel/locking/rtmutex.c if (rt_mutex_owner(lock) != current) { current 111 kernel/locking/rwsem.c atomic_long_read(&(sem)->owner), (long)current, \ current 177 kernel/locking/rwsem.c atomic_long_set(&sem->owner, (long)current); current 214 kernel/locking/rwsem.c __rwsem_set_reader_owned(sem, current); current 245 kernel/locking/rwsem.c while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) { current 891 kernel/locking/rwsem.c if (rt_task(current) && current 1039 kernel/locking/rwsem.c waiter.task = current; current 1094 kernel/locking/rwsem.c if (signal_pending_state(state, current)) { current 1166 kernel/locking/rwsem.c waiter.task = current; current 1235 kernel/locking/rwsem.c if (signal_pending_state(state, current)) current 1260 kernel/locking/rwsem.c if ((wstate == WRITER_FIRST) && (rt_task(current) || current 1458 kernel/locking/rwsem.c DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) && current 1481 kernel/locking/rwsem.c DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); current 210 kernel/locking/semaphore.c waiter.task = current; current 214 kernel/locking/semaphore.c if (signal_pending_state(state, current)) current 60 kernel/locking/spinlock_debug.c current->comm, task_pid_nr(current)); current 84 kernel/locking/spinlock_debug.c SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion"); current 92 kernel/locking/spinlock_debug.c WRITE_ONCE(lock->owner, current); current 99 kernel/locking/spinlock_debug.c SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); current 148 kernel/locking/spinlock_debug.c msg, raw_smp_processor_id(), current->comm, current 149 kernel/locking/spinlock_debug.c task_pid_nr(current), lock); current 183 kernel/locking/spinlock_debug.c RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); current 191 kernel/locking/spinlock_debug.c WRITE_ONCE(lock->owner, current); current 197 kernel/locking/spinlock_debug.c RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); current 3577 kernel/module.c current->flags &= ~PF_USED_ASYNC; current 3616 kernel/module.c if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC)) current 202 kernel/nsproxy.c *new_nsp = create_new_namespaces(unshare_flags, current, user_ns, current 203 kernel/nsproxy.c new_fs ? new_fs : current->fs); current 235 kernel/nsproxy.c struct task_struct *tsk = current; current 565 kernel/panic.c raw_smp_processor_id(), current->pid, file, line, current 569 kernel/panic.c raw_smp_processor_id(), current->pid, caller); current 592 kernel/panic.c print_irqtrace_events(current); current 679 kernel/panic.c current->comm, task_pid_nr(current), current 269 kernel/pid.c return find_pid_ns(nr, task_active_pid_ns(current)); current 355 kernel/pid.c return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); current 421 kernel/pid.c return pid_nr_ns(pid, task_active_pid_ns(current)); current 432 kernel/pid.c ns = task_active_pid_ns(current); current 155 kernel/pid_namespace.c if (task_active_pid_ns(current) != old_ns) current 185 kernel/pid_namespace.c struct task_struct *task, *me = current; current 260 kernel/pid_namespace.c current->signal->group_exit_code = pid_ns->reboot; current 270 kernel/pid_namespace.c struct pid_namespace *pid_ns = task_active_pid_ns(current); current 385 kernel/pid_namespace.c struct pid_namespace *active = task_active_pid_ns(current); current 416 kernel/pid_namespace.c struct pid_namespace *active = task_active_pid_ns(current); current 26 kernel/power/main.c current->flags |= PF_FREEZER_SKIP; current 49 kernel/power/main.c current->flags &= ~PF_FREEZER_SKIP; current 53 kernel/power/process.c if (p == current || !freeze_task(p)) current 102 kernel/power/process.c if (p != current && !freezer_should_skip(p) current 132 kernel/power/process.c current->flags |= PF_SUSPEND_TASK; current 192 kernel/power/process.c struct task_struct *curr = current; current 711 kernel/printk/printk.c current->comm, task_pid_nr(current)); current 836 kernel/printk/printk.c if (!___ratelimit(&user->rs, current->comm)) current 1678 kernel/printk/printk.c console_owner = current; current 1752 kernel/printk/printk.c if (!waiter && owner && owner != current) { current 1839 kernel/printk/printk.c return in_task() ? task_pid_nr(current) : current 53 kernel/ptrace.c (current != tsk->parent) || current 196 kernel/ptrace.c WARN_ON(!task->ptrace || task->parent != current); current 241 kernel/ptrace.c if (child->ptrace && child->parent == current) { current 303 kernel/ptrace.c if (same_thread_group(task, current)) current 386 kernel/ptrace.c if (same_thread_group(task, current)) current 415 kernel/ptrace.c ptrace_link(task, current); current 479 kernel/ptrace.c if (!current->ptrace) { current 480 kernel/ptrace.c ret = security_ptrace_traceme(current->parent); current 486 kernel/ptrace.c if (!ret && !(current->real_parent->flags & PF_EXITING)) { current 487 kernel/ptrace.c current->ptrace = PT_PTRACED; current 488 kernel/ptrace.c ptrace_link(current, current->real_parent); current 568 kernel/ptrace.c __ptrace_detach(current, child); current 660 kernel/ptrace.c if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED || current 661 kernel/ptrace.c current->ptrace & PT_SUSPEND_SECCOMP) current 776 kernel/ptrace.c if (signal_pending(current)) current 851 kernel/ptrace.c need_siglock = data && !thread_group_empty(current); current 1251 kernel/ptrace.c arch_ptrace_attach(current); current 329 kernel/rcu/rcuperf.c set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); current 330 kernel/rcu/rcuperf.c set_user_nice(current, MAX_NICE); current 371 kernel/rcu/rcuperf.c set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); current 373 kernel/rcu/rcuperf.c sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); current 435 kernel/rcu/rcuperf.c sched_setscheduler_nocheck(current, current 695 kernel/rcu/rcutorture.c rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); current 809 kernel/rcu/rcutorture.c if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { current 1008 kernel/rcu/rcutorture.c current == writer_task); current 1103 kernel/rcu/rcutorture.c set_user_nice(current, MAX_NICE); current 1363 kernel/rcu/rcutorture.c set_user_nice(current, MAX_NICE); current 1958 kernel/rcu/rcutorture.c set_user_nice(current, MAX_NICE); current 2018 kernel/rcu/rcutorture.c set_user_nice(current, MAX_NICE); current 71 kernel/rcu/tiny.c set_tsk_need_resched(current); current 146 kernel/rcu/tiny.c if (unlikely(is_idle_task(current))) { current 222 kernel/rcu/tree.c rcu_preempt_deferred_qs(current); current 376 kernel/rcu/tree.c rcu_preempt_deferred_qs(current); current 580 kernel/rcu/tree.c WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); current 584 kernel/rcu/tree.c rcu_preempt_deferred_qs(current); current 748 kernel/rcu/tree.c WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current)); current 1235 kernel/rcu/tree.c if ((current == rcu_state.gp_kthread && current 1667 kernel/rcu/tree.c WARN_ON(signal_pending(current)); current 1793 kernel/rcu/tree.c WARN_ON(signal_pending(current)); current 2129 kernel/rcu/tree.c need_resched(), is_idle_task(current), current 2166 kernel/rcu/tree.c (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) current 2189 kernel/rcu/tree.c is_idle_task(current), rcu_is_callbacks_kthread()); current 2239 kernel/rcu/tree.c set_tsk_need_resched(current); current 2355 kernel/rcu/tree.c rcu_preempt_deferred_qs(current); current 2356 kernel/rcu/tree.c } else if (rcu_preempt_need_deferred_qs(current)) { current 2357 kernel/rcu/tree.c set_tsk_need_resched(current); current 2396 kernel/rcu/tree.c if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) current 2408 kernel/rcu/tree.c if (t != NULL && t != current) current 3180 kernel/rcu/tree.c rcu_preempt_deferred_qs(current); current 604 kernel/rcu/tree_exp.h struct task_struct *t = current; current 705 kernel/rcu/tree_exp.h set_tsk_need_resched(current); current 137 kernel/rcu/tree_plugin.h struct task_struct *t = current; current 268 kernel/rcu/tree_plugin.h WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); current 287 kernel/rcu/tree_plugin.h struct task_struct *t = current; current 358 kernel/rcu/tree_plugin.h current->rcu_read_lock_nesting++; current 360 kernel/rcu/tree_plugin.h WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX); current 374 kernel/rcu/tree_plugin.h struct task_struct *t = current; current 627 kernel/rcu/tree_plugin.h set_tsk_need_resched(current); current 684 kernel/rcu/tree_plugin.h struct task_struct *t = current; current 687 kernel/rcu/tree_plugin.h rcu_note_voluntary_context_switch(current); current 723 kernel/rcu/tree_plugin.h struct task_struct *t = current; current 725 kernel/rcu/tree_plugin.h if (unlikely(!list_empty(¤t->rcu_node_entry))) { current 735 kernel/rcu/tree_plugin.h rcu_preempt_deferred_qs(current); current 855 kernel/rcu/tree_plugin.h rcu_tasks_qs(current); current 950 kernel/rcu/tree_plugin.h sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); current 1096 kernel/rcu/tree_plugin.h return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; current 2064 kernel/rcu/tree_plugin.h WARN_ON(signal_pending(current)); current 2130 kernel/rcu/tree_plugin.h WARN_ON(signal_pending(current)); current 2383 kernel/rcu/tree_plugin.h WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask)); current 2570 kernel/rcu/tree_plugin.h housekeeping_affine(current, HK_FLAG_RCU); current 2577 kernel/rcu/tree_plugin.h WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id()); current 2585 kernel/rcu/tree_plugin.h WRITE_ONCE(current->rcu_tasks_idle_cpu, -1); current 410 kernel/rcu/tree_stall.h sched_show_task(current); current 473 kernel/rcu/tree_stall.h set_tsk_need_resched(current); current 252 kernel/rcu/update.c current->lockdep_recursion == 0; current 641 kernel/rcu/update.c housekeeping_affine(current, HK_FLAG_RCU); current 663 kernel/rcu/update.c WARN_ON(signal_pending(current)); current 694 kernel/rcu/update.c if (t != current && READ_ONCE(t->on_rq) && current 745 kernel/rcu/update.c WARN_ON(signal_pending(current)); current 807 kernel/rcu/update.c current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); current 815 kernel/rcu/update.c __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx); current 230 kernel/reboot.c current->flags |= PF_NO_SETAFFINITY; current 233 kernel/reboot.c set_cpus_allowed_ptr(current, cpumask_of(cpu)); current 313 kernel/reboot.c struct pid_namespace *pid_ns = task_active_pid_ns(current); current 1127 kernel/resource.c DECLARE_WAITQUEUE(wait, current); current 152 kernel/rseq.c if (current->rseq_sig != sig) { current 155 kernel/rseq.c sig, current->rseq_sig, current->pid, usig); current 224 kernel/rseq.c struct task_struct *t = current; current 264 kernel/rseq.c struct task_struct *t = current; current 292 kernel/rseq.c struct task_struct *t = current; current 314 kernel/rseq.c if (current->rseq != rseq || !current->rseq) current 318 kernel/rseq.c if (current->rseq_sig != sig) current 320 kernel/rseq.c ret = rseq_reset_rseq_cpu_id(current); current 323 kernel/rseq.c current->rseq = NULL; current 324 kernel/rseq.c current->rseq_sig = 0; current 331 kernel/rseq.c if (current->rseq) { current 337 kernel/rseq.c if (current->rseq != rseq || rseq_len != sizeof(*rseq)) current 339 kernel/rseq.c if (current->rseq_sig != sig) current 354 kernel/rseq.c current->rseq = rseq; current 355 kernel/rseq.c current->rseq_sig = sig; current 361 kernel/rseq.c rseq_set_notify_resume(current); current 190 kernel/sched/autogroup.c sig->autogroup = autogroup_task_get(current); current 218 kernel/sched/autogroup.c err = security_task_setnice(current, nice); current 222 kernel/sched/autogroup.c if (nice < 0 && !can_nice(current, nice)) current 73 kernel/sched/completion.c DECLARE_WAITQUEUE(wait, current); current 77 kernel/sched/completion.c if (signal_pending_state(state, current)) { current 2518 kernel/sched/core.c if (p == current) { current 2850 kernel/sched/core.c p->prio = current->normal_prio; current 3004 kernel/sched/core.c hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); current 3199 kernel/sched/core.c current->comm, current->pid, preempt_count())) current 3217 kernel/sched/core.c perf_event_task_sched_in(prev, current); current 3221 kernel/sched/core.c kcov_finish_switch(current); current 3223 kernel/sched/core.c fire_sched_in_preempt_notifiers(current); current 3319 kernel/sched/core.c if (current->set_child_tid) current 3320 kernel/sched/core.c put_user(task_pid_vnr(current), current->set_child_tid); current 3492 kernel/sched/core.c struct task_struct *p = current; current 3765 kernel/sched/core.c current->preempt_disable_ip = ip; current 3845 kernel/sched/core.c unsigned long preempt_disable_ip = get_preempt_disable_ip(current); current 4092 kernel/sched/core.c current->flags |= PF_NOFREEZE; current 4139 kernel/sched/core.c struct task_struct *tsk = current; current 4170 kernel/sched/core.c WARN_ON_ONCE(current->state); current 4581 kernel/sched/core.c nice = task_nice(current) + increment; current 4584 kernel/sched/core.c if (increment < 0 && !can_nice(current, nice)) current 4587 kernel/sched/core.c retval = security_task_setnice(current, nice); current 4591 kernel/sched/core.c set_user_nice(current, nice); current 4670 kernel/sched/core.c return pid ? find_task_by_vpid(pid) : current; current 5588 kernel/sched/core.c current->sched_class->yield_task(rq); current 5694 kernel/sched/core.c struct task_struct *curr = current; current 5753 kernel/sched/core.c int old_iowait = current->in_iowait; current 5755 kernel/sched/core.c current->in_iowait = 1; current 5756 kernel/sched/core.c blk_schedule_flush_plug(current); current 5763 kernel/sched/core.c current->in_iowait = token; current 6177 kernel/sched/core.c struct mm_struct *mm = current->active_mm; current 6182 kernel/sched/core.c switch_mm(mm, &init_mm, current); current 6183 kernel/sched/core.c current->active_mm = &init_mm; current 6510 kernel/sched/core.c if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0) current 6694 kernel/sched/core.c enter_lazy_tlb(&init_mm, current); current 6702 kernel/sched/core.c init_idle(current, smp_processor_id()); current 6735 kernel/sched/core.c WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, current 6738 kernel/sched/core.c current->state, current 6739 kernel/sched/core.c (void *)current->task_state_change, current 6740 kernel/sched/core.c (void *)current->task_state_change); current 6757 kernel/sched/core.c !is_idle_task(current) && !current->non_block_count) || current 6767 kernel/sched/core.c preempt_disable_ip = get_preempt_disable_ip(current); current 6774 kernel/sched/core.c in_atomic(), irqs_disabled(), current->non_block_count, current 6775 kernel/sched/core.c current->pid, current->comm); current 6777 kernel/sched/core.c if (task_stack_end_corrupted(current)) current 6780 kernel/sched/core.c debug_show_held_locks(current); current 6782 kernel/sched/core.c print_irqtrace_events(current); current 6814 kernel/sched/core.c current->pid, current->comm); current 6816 kernel/sched/core.c debug_show_held_locks(current); current 309 kernel/sched/cputime.c if (same_thread_group(current, tsk)) current 310 kernel/sched/cputime.c (void) task_sched_runtime(current); current 397 kernel/sched/cputime.c irqtime_account_process_tick(current, 0, rq, ticks); current 815 kernel/sched/cputime.c vtime = ¤t->vtime; current 1094 kernel/sched/fair.c return rcu_dereference_check(p->numa_group, p == current || current 1100 kernel/sched/fair.c return rcu_dereference_protected(p->numa_group, p == current); current 1415 kernel/sched/fair.c this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); current 2307 kernel/sched/fair.c if (tsk->mm == current->mm) current 2396 kernel/sched/fair.c struct task_struct *p = current; current 2398 kernel/sched/fair.c int cpu_node = task_node(current); current 2487 kernel/sched/fair.c struct task_struct *p = current; current 2663 kernel/sched/fair.c delay = min_t(unsigned int, task_scan_max(current), current 2664 kernel/sched/fair.c current->numa_scan_period * mm_users * NSEC_PER_MSEC); current 5426 kernel/sched/fair.c if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { current 5427 kernel/sched/fair.c current->wakee_flips >>= 1; current 5428 kernel/sched/fair.c current->wakee_flip_decay_ts = jiffies; current 5431 kernel/sched/fair.c if (current->last_wakee != p) { current 5432 kernel/sched/fair.c current->last_wakee = p; current 5433 kernel/sched/fair.c current->wakee_flips++; current 5456 kernel/sched/fair.c unsigned int master = current->wakee_flips; current 5513 kernel/sched/fair.c unsigned long current_load = task_h_load(current); current 6190 kernel/sched/fair.c if (unlikely(task_on_rq_queued(p) || current == p)) current 6475 kernel/sched/fair.c int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); current 6524 kernel/sched/fair.c current->recent_used_cpu = cpu; current 10011 kernel/sched/fair.c cfs_rq = task_cfs_rq(current); current 289 kernel/sched/idle.c if (unlikely(klp_patch_pending(current))) current 290 kernel/sched/idle.c klp_update_patch_state(current); current 309 kernel/sched/idle.c set_tsk_need_resched(current); current 322 kernel/sched/idle.c WARN_ON_ONCE(current->policy != SCHED_FIFO); current 323 kernel/sched/idle.c WARN_ON_ONCE(current->nr_cpus_allowed != 1); current 324 kernel/sched/idle.c WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); current 325 kernel/sched/idle.c WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); current 330 kernel/sched/idle.c current->flags |= PF_IDLE; current 343 kernel/sched/idle.c current->flags &= ~PF_IDLE; current 37 kernel/sched/membarrier.c if (current->mm != mm) current 136 kernel/sched/membarrier.c struct mm_struct *mm = current->mm; current 261 kernel/sched/membarrier.c struct task_struct *p = current; current 280 kernel/sched/membarrier.c struct task_struct *p = current; current 821 kernel/sched/psi.c *flags = current->flags & PF_MEMSTALL; current 831 kernel/sched/psi.c current->flags |= PF_MEMSTALL; current 832 kernel/sched/psi.c psi_task_change(current, 0, TSK_MEMSTALL); current 860 kernel/sched/psi.c current->flags &= ~PF_MEMSTALL; current 861 kernel/sched/psi.c psi_task_change(current, TSK_MEMSTALL, 0); current 74 kernel/sched/swait.c wait->task = current; current 96 kernel/sched/swait.c if (signal_pending_state(state, current)) { current 252 kernel/sched/wait.c wq_entry->private = current; current 264 kernel/sched/wait.c if (signal_pending_state(state, current)) { current 307 kernel/sched/wait.c if (signal_pending(current)) current 324 kernel/sched/wait.c if (signal_pending(current)) current 383 kernel/sched/wait.c return (current->flags & PF_KTHREAD) && kthread_should_stop(); current 182 kernel/sched/wait_bit.c .private = current, current 199 kernel/sched/wait_bit.c if (signal_pending_state(mode, current)) current 209 kernel/sched/wait_bit.c if (signal_pending_state(mode, current)) current 223 kernel/sched/wait_bit.c if (signal_pending_state(mode, current)) current 237 kernel/sched/wait_bit.c if (signal_pending_state(mode, current)) current 146 kernel/seccomp.c struct task_struct *task = current; current 260 kernel/seccomp.c READ_ONCE(current->seccomp.filter); current 286 kernel/seccomp.c assert_spin_locked(¤t->sighand->siglock); current 288 kernel/seccomp.c if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) current 341 kernel/seccomp.c BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); current 342 kernel/seccomp.c assert_spin_locked(¤t->sighand->siglock); current 345 kernel/seccomp.c caller = current; current 382 kernel/seccomp.c BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); current 383 kernel/seccomp.c assert_spin_locked(¤t->sighand->siglock); current 386 kernel/seccomp.c caller = current; current 447 kernel/seccomp.c if (!task_no_new_privs(current) && current 516 kernel/seccomp.c assert_spin_locked(¤t->sighand->siglock); current 520 kernel/seccomp.c for (walker = current->seccomp.filter; walker; walker = walker->prev) current 542 kernel/seccomp.c filter->prev = current->seccomp.filter; current 543 kernel/seccomp.c current->seccomp.filter = filter; current 595 kernel/seccomp.c info->si_call_addr = (void __user *)KSTK_EIP(current); current 597 kernel/seccomp.c info->si_arch = syscall_get_arch(current); current 709 kernel/seccomp.c int mode = current->seccomp.mode; current 712 kernel/seccomp.c unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) current 748 kernel/seccomp.c n.task = current; current 783 kernel/seccomp.c syscall_set_return_value(current, task_pt_regs(current), current 815 kernel/seccomp.c syscall_set_return_value(current, task_pt_regs(current), current 821 kernel/seccomp.c syscall_rollback(current, task_pt_regs(current)); current 832 kernel/seccomp.c if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { current 833 kernel/seccomp.c syscall_set_return_value(current, current 834 kernel/seccomp.c task_pt_regs(current), current 851 kernel/seccomp.c if (fatal_signal_pending(current)) current 854 kernel/seccomp.c this_syscall = syscall_get_nr(current, task_pt_regs(current)); current 891 kernel/seccomp.c get_nr_threads(current) == 1) { current 895 kernel/seccomp.c syscall_rollback(current, task_pt_regs(current)); current 922 kernel/seccomp.c int mode = current->seccomp.mode; current 926 kernel/seccomp.c unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) current 930 kernel/seccomp.c syscall_get_nr(current, task_pt_regs(current)); current 946 kernel/seccomp.c return current->seccomp.mode; current 961 kernel/seccomp.c spin_lock_irq(¤t->sighand->siglock); current 969 kernel/seccomp.c seccomp_assign_mode(current, seccomp_mode, 0); current 973 kernel/seccomp.c spin_unlock_irq(¤t->sighand->siglock); current 1216 kernel/seccomp.c for (cur = current->seccomp.filter; cur; cur = cur->prev) { current 1307 kernel/seccomp.c mutex_lock_killable(¤t->signal->cred_guard_mutex)) current 1310 kernel/seccomp.c spin_lock_irq(¤t->sighand->siglock); current 1321 kernel/seccomp.c seccomp_assign_mode(current, seccomp_mode, flags); current 1323 kernel/seccomp.c spin_unlock_irq(¤t->sighand->siglock); current 1325 kernel/seccomp.c mutex_unlock(¤t->signal->cred_guard_mutex); current 1508 kernel/seccomp.c current->seccomp.mode != SECCOMP_MODE_DISABLED) { current 1546 kernel/seccomp.c current->seccomp.mode != SECCOMP_MODE_DISABLED) { current 184 kernel/signal.c if (!recalc_sigpending_tsk(current) && !freezing(current) && current 185 kernel/signal.c !klp_patch_pending(current)) current 196 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 197 kernel/signal.c set_tsk_thread_flag(current, TIF_SIGPENDING); current 199 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 265 kernel/signal.c current->comm, current->pid, sig); current 395 kernel/signal.c unsigned long jobctl = current->jobctl; current 397 kernel/signal.c struct signal_struct *sig = current->signal; current 513 kernel/signal.c struct task_struct *tsk = current; current 684 kernel/signal.c current->jobctl |= JOBCTL_STOP_DEQUEUED; current 708 kernel/signal.c struct task_struct *tsk = current; current 843 kernel/signal.c if (!same_thread_group(current, t) && current 852 kernel/signal.c if (!sid || sid == task_session(current)) current 1124 kernel/signal.c q->info.si_pid = task_tgid_nr_ns(current, current 1216 kernel/signal.c force = !task_pid_nr_ns(current, task_active_pid_ns(t)); current 1236 kernel/signal.c if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { current 1341 kernel/signal.c return force_sig_info_to_task(info, current); current 1574 kernel/signal.c pid ? find_vpid(-pid) : task_pgrp(current)); current 1581 kernel/signal.c !same_thread_group(p, current)) { current 1645 kernel/signal.c struct task_struct *p = current; current 1685 kernel/signal.c ___ARCH_SI_IA64(imm, flags, isr), current); current 1813 kernel/signal.c struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); current 1824 kernel/signal.c spinlock_t *lock = ¤t->sighand->siglock; current 2081 kernel/signal.c if (!likely(current->ptrace)) current 2096 kernel/signal.c if (unlikely(current->mm->core_state) && current 2097 kernel/signal.c unlikely(current->mm == current->parent->mm)) current 2125 kernel/signal.c __releases(¤t->sighand->siglock) current 2126 kernel/signal.c __acquires(¤t->sighand->siglock) current 2142 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2144 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 2145 kernel/signal.c if (sigkill_pending(current)) current 2171 kernel/signal.c current->last_siginfo = info; current 2172 kernel/signal.c current->exit_code = exit_code; current 2181 kernel/signal.c if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) current 2182 kernel/signal.c gstop_done = task_participate_group_stop(current); current 2185 kernel/signal.c task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); current 2187 kernel/signal.c task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); current 2190 kernel/signal.c task_clear_jobctl_trapping(current); current 2192 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2205 kernel/signal.c do_notify_parent_cldstop(current, true, why); current 2206 kernel/signal.c if (gstop_done && ptrace_reparented(current)) current 2207 kernel/signal.c do_notify_parent_cldstop(current, false, why); current 2233 kernel/signal.c do_notify_parent_cldstop(current, false, why); current 2238 kernel/signal.c current->exit_code = 0; current 2247 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 2248 kernel/signal.c current->last_siginfo = NULL; current 2251 kernel/signal.c current->jobctl &= ~JOBCTL_LISTENING; current 2258 kernel/signal.c recalc_sigpending_tsk(current); current 2268 kernel/signal.c info.si_pid = task_pid_vnr(current); current 2278 kernel/signal.c if (unlikely(current->task_works)) current 2281 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 2283 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2309 kernel/signal.c __releases(¤t->sighand->siglock) current 2311 kernel/signal.c struct signal_struct *sig = current->signal; current 2313 kernel/signal.c if (!(current->jobctl & JOBCTL_STOP_PENDING)) { current 2320 kernel/signal.c if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || current 2347 kernel/signal.c if (task_set_jobctl_pending(current, signr | gstop)) current 2350 kernel/signal.c t = current; current 2351 kernel/signal.c while_each_thread(current, t) { current 2368 kernel/signal.c if (likely(!current->ptrace)) { current 2376 kernel/signal.c if (task_participate_group_stop(current)) current 2380 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2393 kernel/signal.c do_notify_parent_cldstop(current, false, notify); current 2406 kernel/signal.c task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); current 2428 kernel/signal.c struct signal_struct *signal = current->signal; current 2429 kernel/signal.c int signr = current->jobctl & JOBCTL_STOP_SIGMASK; current 2431 kernel/signal.c if (current->ptrace & PT_SEIZED) { current 2441 kernel/signal.c current->exit_code = 0; current 2456 kernel/signal.c __releases(¤t->sighand->siglock) current 2463 kernel/signal.c if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != current 2465 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2477 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 2493 kernel/signal.c current->jobctl |= JOBCTL_STOP_DEQUEUED; current 2497 kernel/signal.c signr = current->exit_code; current 2501 kernel/signal.c current->exit_code = 0; current 2515 kernel/signal.c info->si_pid = task_pid_vnr(current->parent); current 2517 kernel/signal.c task_uid(current->parent)); current 2522 kernel/signal.c if (sigismember(¤t->blocked, signr)) { current 2523 kernel/signal.c send_signal(signr, info, current, PIDTYPE_PID); current 2532 kernel/signal.c struct sighand_struct *sighand = current->sighand; current 2533 kernel/signal.c struct signal_struct *signal = current->signal; current 2536 kernel/signal.c if (unlikely(current->task_works)) current 2577 kernel/signal.c do_notify_parent_cldstop(current, false, why); current 2579 kernel/signal.c if (ptrace_reparented(current->group_leader)) current 2580 kernel/signal.c do_notify_parent_cldstop(current->group_leader, current 2590 kernel/signal.c sigdelset(¤t->pending.signal, SIGKILL); current 2600 kernel/signal.c if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && current 2604 kernel/signal.c if (unlikely(current->jobctl & current 2606 kernel/signal.c if (current->jobctl & JOBCTL_TRAP_MASK) { current 2609 kernel/signal.c } else if (current->jobctl & JOBCTL_TRAP_FREEZE) current 2619 kernel/signal.c if (unlikely(cgroup_task_frozen(current))) { current 2633 kernel/signal.c signr = dequeue_signal(current, ¤t->blocked, &ksig->info); current 2638 kernel/signal.c if (unlikely(current->ptrace) && signr != SIGKILL) { current 2717 kernel/signal.c if (unlikely(cgroup_task_frozen(current))) current 2723 kernel/signal.c current->flags |= PF_SIGNALED; current 2728 kernel/signal.c proc_coredump_connector(current); current 2772 kernel/signal.c sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); current 2878 kernel/signal.c struct restart_block *restart = ¤t->restart_block; current 2892 kernel/signal.c sigandnsets(&newblocked, newset, ¤t->blocked); current 2914 kernel/signal.c struct task_struct *tsk = current; current 2938 kernel/signal.c struct task_struct *tsk = current; current 2985 kernel/signal.c current->saved_sigmask = current->blocked; current 3005 kernel/signal.c current->saved_sigmask = current->blocked; current 3029 kernel/signal.c old_set = current->blocked; current 3053 kernel/signal.c sigset_t old_set = current->blocked; current 3076 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 3077 kernel/signal.c sigorsets(set, ¤t->pending.signal, current 3078 kernel/signal.c ¤t->signal->shared_pending.signal); current 3079 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 3082 kernel/signal.c sigandsets(set, ¤t->blocked, set); current 3445 kernel/signal.c struct task_struct *tsk = current; current 3634 kernel/signal.c info->si_pid = task_tgid_vnr(current); current 3659 kernel/signal.c struct pid_namespace *active = task_active_pid_ns(current); current 3755 kernel/signal.c if ((task_pid(current) != pid) && current 3807 kernel/signal.c info.si_pid = task_tgid_vnr(current); current 3854 kernel/signal.c (task_pid_vnr(current) != pid)) current 3901 kernel/signal.c (task_pid_vnr(current) != pid)) current 3937 kernel/signal.c spin_lock_irq(¤t->sighand->siglock); current 3938 kernel/signal.c current->sighand->action[sig - 1].sa.sa_handler = action; current 3945 kernel/signal.c flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); current 3946 kernel/signal.c flush_sigqueue_mask(&mask, ¤t->pending); current 3949 kernel/signal.c spin_unlock_irq(¤t->sighand->siglock); current 3960 kernel/signal.c struct task_struct *p = current, *t; current 4007 kernel/signal.c struct task_struct *t = current; current 4014 kernel/signal.c (current->sas_ss_flags & SS_FLAG_BITS); current 4073 kernel/signal.c struct task_struct *t = current; current 4131 kernel/signal.c struct task_struct *t = current; current 4195 kernel/signal.c old_set = current->blocked.sig[0]; current 4201 kernel/signal.c new_blocked = current->blocked; current 4394 kernel/signal.c return current->blocked.sig[0]; current 4399 kernel/signal.c int old = current->blocked.sig[0]; current 4432 kernel/signal.c while (!signal_pending(current)) { current 4443 kernel/signal.c current->saved_sigmask = current->blocked; current 4446 kernel/signal.c while (!signal_pending(current)) { current 42 kernel/smpboot.c per_cpu(idle_threads, smp_processor_id()) = current; current 134 kernel/softirq.c current->preempt_disable_ip = get_lock_parent_ip(); current 227 kernel/softirq.c if (trace_hardirq_context(current)) { current 252 kernel/softirq.c unsigned long old_flags = current->flags; current 264 kernel/softirq.c current->flags &= ~PF_MEMALLOC; current 267 kernel/softirq.c account_irq_enter_time(current); current 304 kernel/softirq.c if (__this_cpu_read(ksoftirqd) == current) current 318 kernel/softirq.c account_irq_exit_time(current); current 348 kernel/softirq.c if (is_idle_task(current) && !in_interrupt()) { current 410 kernel/softirq.c account_irq_exit_time(current); current 54 kernel/stackleak.c unsigned long kstack_ptr = current->lowest_stack; current 55 kernel/stackleak.c unsigned long boundary = (unsigned long)end_of_stack(current); current 84 kernel/stackleak.c current->prev_lowest_stack = kstack_ptr; current 103 kernel/stackleak.c current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64; current 128 kernel/stackleak.c if (sp < current->lowest_stack && current 129 kernel/stackleak.c sp >= (unsigned long)task_stack_page(current) + current 131 kernel/stackleak.c current->lowest_stack = sp; current 123 kernel/stacktrace.c arch_stack_walk(consume_entry, &c, current, NULL); current 145 kernel/stacktrace.c .skip = skipnr + !!(current == tsk), current 175 kernel/stacktrace.c arch_stack_walk(consume_entry, &c, current, regs); current 233 kernel/stacktrace.c if (current->flags & PF_KTHREAD) current 238 kernel/stacktrace.c arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); current 303 kernel/stacktrace.c .skip = skipnr + !!(current == task), current 222 kernel/sys.c p = current; current 230 kernel/sys.c pgrp = task_pgrp(current); current 285 kernel/sys.c p = current; current 296 kernel/sys.c pgrp = task_pgrp(current); current 469 kernel/sys.c current->flags |= PF_NPROC_EXCEEDED; current 471 kernel/sys.c current->flags &= ~PF_NPROC_EXCEEDED; current 893 kernel/sys.c return task_tgid_vnr(current); current 899 kernel/sys.c return task_pid_vnr(current); current 913 kernel/sys.c pid = task_tgid_vnr(rcu_dereference(current->real_parent)); current 947 kernel/sys.c thread_group_cputime_adjusted(current, &tgutime, &tgstime); current 948 kernel/sys.c cutime = current->signal->cutime; current 949 kernel/sys.c cstime = current->signal->cstime; current 1009 kernel/sys.c struct task_struct *group_leader = current->group_leader; current 1085 kernel/sys.c grp = task_pgrp(current); current 1127 kernel/sys.c sid = task_session(current); current 1149 kernel/sys.c struct task_struct *curr = current->group_leader; current 1160 kernel/sys.c struct task_struct *group_leader = current->group_leader; current 1200 kernel/sys.c (personality(current->personality) == PER_LINUX32 && \ current 1217 kernel/sys.c if (current->personality & UNAME26) { current 1310 kernel/sys.c if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) current 1363 kernel/sys.c if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) current 1388 kernel/sys.c ret = do_prlimit(current, resource, NULL, &value); current 1414 kernel/sys.c return do_prlimit(current, resource, &r, NULL); current 1423 kernel/sys.c ret = do_prlimit(current, resource, NULL, &r); current 1456 kernel/sys.c task_lock(current->group_leader); current 1457 kernel/sys.c x = current->signal->rlim[resource]; current 1458 kernel/sys.c task_unlock(current->group_leader); current 1476 kernel/sys.c task_lock(current->group_leader); current 1477 kernel/sys.c r = current->signal->rlim[resource]; current 1478 kernel/sys.c task_unlock(current->group_leader); current 1590 kernel/sys.c if (current == task) current 1627 kernel/sys.c tsk = pid ? find_task_by_vpid(pid) : current; current 1659 kernel/sys.c return do_prlimit(current, resource, &new_rlim, NULL); current 1716 kernel/sys.c task_cputime_adjusted(current, &utime, &stime); current 1788 kernel/sys.c getrusage(current, who, &r); current 1801 kernel/sys.c getrusage(current, who, &r); current 1808 kernel/sys.c mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); current 1953 kernel/sys.c struct mm_struct *mm = current->mm; current 2077 kernel/sys.c task_lock(current); current 2079 kernel/sys.c task_unlock(current); current 2087 kernel/sys.c struct mm_struct *mm = current->mm; current 2265 kernel/sys.c struct task_struct *me = current; current 2359 kernel/sys.c if (current->timer_slack_ns > ULONG_MAX) current 2362 kernel/sys.c error = current->timer_slack_ns; current 2366 kernel/sys.c current->timer_slack_ns = current 2367 kernel/sys.c current->default_timer_slack_ns; current 2369 kernel/sys.c current->timer_slack_ns = arg2; current 2378 kernel/sys.c current->flags &= ~PF_MCE_PROCESS; current 2381 kernel/sys.c current->flags |= PF_MCE_PROCESS; current 2383 kernel/sys.c current->flags |= PF_MCE_EARLY; current 2385 kernel/sys.c current->flags &= ~PF_MCE_EARLY; current 2387 kernel/sys.c current->flags &= current 2399 kernel/sys.c if (current->flags & PF_MCE_PROCESS) current 2400 kernel/sys.c error = (current->flags & PF_MCE_EARLY) ? current 2426 kernel/sys.c task_set_no_new_privs(current); current 2431 kernel/sys.c return task_no_new_privs(current) ? 1 : 0; current 2072 kernel/sysctl.c current->comm, table->procname); current 1232 kernel/sysctl_binary.c struct net *net = current->nsproxy->net_ns; current 1305 kernel/sysctl_binary.c mnt = task_active_pid_ns(current)->proc_mnt; current 1346 kernel/sysctl_binary.c "system call with ", current->comm); current 92 kernel/task_work.c struct task_struct *task = current; current 190 kernel/taskstats.c fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); current 291 kernel/taskstats.c if (task_active_pid_ns(current) != &init_pid_ns) current 735 kernel/time/alarmtimer.c alarm->data = (void *)current; current 743 kernel/time/alarmtimer.c } while (alarm->data && !signal_pending(current)); current 752 kernel/time/alarmtimer.c if (freezing(current)) current 754 kernel/time/alarmtimer.c restart = ¤t->restart_block; current 809 kernel/time/alarmtimer.c struct restart_block *restart = ¤t->restart_block; current 1822 kernel/time/hrtimer.c if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT)) current 1828 kernel/time/hrtimer.c sl->task = current; current 1879 kernel/time/hrtimer.c } while (t->task && !signal_pending(current)); current 1886 kernel/time/hrtimer.c restart = ¤t->restart_block; current 1921 kernel/time/hrtimer.c slack = current->timer_slack_ns; current 1922 kernel/time/hrtimer.c if (dl_task(current) || rt_task(current)) current 1937 kernel/time/hrtimer.c restart = ¤t->restart_block; current 1959 kernel/time/hrtimer.c current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current 1960 kernel/time/hrtimer.c current->restart_block.nanosleep.rmtp = rmtp; current 1979 kernel/time/hrtimer.c current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current 1980 kernel/time/hrtimer.c current->restart_block.nanosleep.compat_rmtp = rmtp; current 78 kernel/time/itimer.c struct task_struct *tsk = current; current 187 kernel/time/itimer.c struct task_struct *tsk = current; current 300 kernel/time/itimer.c current->comm); current 60 kernel/time/posix-cpu-timers.c return thread ? current : current->group_leader; current 67 kernel/time/posix-cpu-timers.c return same_thread_group(p, current) ? p : NULL; current 79 kernel/time/posix-cpu-timers.c return (p == current || thread_group_leader(p)) ? p : NULL; current 815 kernel/time/posix-cpu-timers.c current->comm, task_pid_nr(current)); current 817 kernel/time/posix-cpu-timers.c __group_send_sig_info(signo, SEND_SIG_PRIV, current); current 1115 kernel/time/posix-cpu-timers.c struct task_struct *tsk = current; current 1235 kernel/time/posix-cpu-timers.c timer.it_process = current; current 1251 kernel/time/posix-cpu-timers.c while (!signal_pending(current)) { current 1306 kernel/time/posix-cpu-timers.c restart = ¤t->restart_block; current 1320 kernel/time/posix-cpu-timers.c struct restart_block *restart_block = ¤t->restart_block; current 1328 kernel/time/posix-cpu-timers.c CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) current 28 kernel/time/posix-stubs.c current->pid, current->comm); current 145 kernel/time/posix-stubs.c current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current 146 kernel/time/posix-stubs.c current->restart_block.nanosleep.rmtp = rmtp; current 231 kernel/time/posix-stubs.c current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current 232 kernel/time/posix-stubs.c current->restart_block.nanosleep.compat_rmtp = rmtp; current 132 kernel/time/posix-timers.c struct signal_struct *sig = current->signal; current 140 kernel/time/posix-timers.c struct signal_struct *sig = current->signal; current 407 kernel/time/posix-timers.c struct pid *pid = task_tgid(current); current 414 kernel/time/posix-timers.c if (!rtn || !same_thread_group(rtn, current)) current 518 kernel/time/posix-timers.c new_timer->it_pid = get_pid(task_tgid(current)); current 534 kernel/time/posix-timers.c spin_lock_irq(¤t->sighand->siglock); current 535 kernel/time/posix-timers.c new_timer->it_signal = current->signal; current 536 kernel/time/posix-timers.c list_add(&new_timer->list, ¤t->signal->posix_timers); current 537 kernel/time/posix-timers.c spin_unlock_irq(¤t->sighand->siglock); current 603 kernel/time/posix-timers.c if (timr->it_signal == current->signal) { current 999 kernel/time/posix-timers.c spin_lock(¤t->sighand->siglock); current 1001 kernel/time/posix-timers.c spin_unlock(¤t->sighand->siglock); current 1227 kernel/time/posix-timers.c current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current 1228 kernel/time/posix-timers.c current->restart_block.nanosleep.rmtp = rmtp; current 1254 kernel/time/posix-timers.c current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current 1255 kernel/time/posix-timers.c current->restart_block.nanosleep.compat_rmtp = rmtp; current 161 kernel/time/tick-sched.c if (is_idle_task(current)) current 221 kernel/time/tick-sched.c if (check_tick_dependency(¤t->tick_dep_mask)) current 224 kernel/time/tick-sched.c if (check_tick_dependency(¤t->signal->tick_dep_mask)) current 389 kernel/time/tick-sched.c if (atomic_read(¤t->tick_dep_mask) || current 390 kernel/time/tick-sched.c atomic_read(¤t->signal->tick_dep_mask)) current 1721 kernel/time/timer.c struct task_struct *p = current; current 1885 kernel/time/timer.c current->state = TASK_RUNNING; current 1892 kernel/time/timer.c timer.task = current; current 2061 kernel/time/timer.c while (timeout && !signal_pending(current)) current 217 kernel/trace/blktrace.c struct task_struct *tsk = current; current 250 kernel/trace/blktrace.c tracing_record_cmdline(current); current 183 kernel/trace/bpf_trace.c current->flags & (PF_KTHREAD | PF_EXITING))) current 205 kernel/trace/bpf_trace.c current->comm, task_pid_nr(current)); current 554 kernel/trace/bpf_trace.c return (long) current; current 575 kernel/trace/bpf_trace.c return task_under_cgroup_hierarchy(current, cgrp); current 646 kernel/trace/bpf_trace.c if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) current 668 kernel/trace/bpf_trace.c work->task = current; current 674 kernel/trace/bpf_trace.c return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID); current 68 kernel/trace/fgraph.c if (!current->ret_stack) current 78 kernel/trace/fgraph.c if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { current 79 kernel/trace/fgraph.c atomic_inc(¤t->trace_overrun); current 85 kernel/trace/fgraph.c index = ++current->curr_ret_stack; current 87 kernel/trace/fgraph.c current->ret_stack[index].ret = ret; current 88 kernel/trace/fgraph.c current->ret_stack[index].func = func; current 89 kernel/trace/fgraph.c current->ret_stack[index].calltime = calltime; current 91 kernel/trace/fgraph.c current->ret_stack[index].fp = frame_pointer; current 94 kernel/trace/fgraph.c current->ret_stack[index].retp = retp; current 105 kernel/trace/fgraph.c trace.depth = ++current->curr_ret_depth; current 116 kernel/trace/fgraph.c current->curr_ret_stack--; current 118 kernel/trace/fgraph.c current->curr_ret_depth--; current 129 kernel/trace/fgraph.c index = current->curr_ret_stack; current 154 kernel/trace/fgraph.c if (unlikely(current->ret_stack[index].fp != frame_pointer)) { current 158 kernel/trace/fgraph.c current->ret_stack[index].fp, current 160 kernel/trace/fgraph.c (void *)current->ret_stack[index].func, current 161 kernel/trace/fgraph.c current->ret_stack[index].ret); current 167 kernel/trace/fgraph.c *ret = current->ret_stack[index].ret; current 168 kernel/trace/fgraph.c trace->func = current->ret_stack[index].func; current 169 kernel/trace/fgraph.c trace->calltime = current->ret_stack[index].calltime; current 170 kernel/trace/fgraph.c trace->overrun = atomic_read(¤t->trace_overrun); current 171 kernel/trace/fgraph.c trace->depth = current->curr_ret_depth--; current 223 kernel/trace/fgraph.c current->curr_ret_stack--; current 803 kernel/trace/ftrace.c if (!current->ret_stack) current 806 kernel/trace/ftrace.c ret_stack = ftrace_graph_get_ret_stack(current, 0); current 835 kernel/trace/ftrace.c ret_stack = ftrace_graph_get_ret_stack(current, 1); current 839 kernel/trace/ftrace.c ret_stack = ftrace_graph_get_ret_stack(current, 0); current 6611 kernel/trace/ftrace.c trace_ignore_this_task(pid_list, current)); current 631 kernel/trace/ring_buffer.c if (signal_pending(current)) { current 1192 kernel/trace/ring_buffer.c bool user_thread = current->mm != NULL; current 1241 kernel/trace/ring_buffer.c if (user_thread && fatal_signal_pending(current)) current 945 kernel/trace/trace.c update_max_tr(tr, current, smp_processor_id(), cond_data); current 1528 kernel/trace/trace.c if (tsk == current) current 2343 kernel/trace/trace.c struct task_struct *tsk = current; current 2971 kernel/trace/trace.c entry->tgid = current->tgid; current 6784 kernel/trace/trace.c update_max_tr(tr, current, smp_processor_id(), NULL); current 6786 kernel/trace/trace.c update_max_tr_single(tr, current, iter->cpu_file); current 597 kernel/trace/trace.h #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) current 598 kernel/trace/trace.h #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) current 599 kernel/trace/trace.h #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) current 602 kernel/trace/trace.h (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) current 605 kernel/trace/trace.h current->trace_recursion &= \ current 607 kernel/trace/trace.h current->trace_recursion |= \ current 641 kernel/trace/trace.h unsigned int val = current->trace_recursion; current 653 kernel/trace/trace.h current->trace_recursion = val; current 661 kernel/trace/trace.h unsigned int val = current->trace_recursion; current 670 kernel/trace/trace.h current->trace_recursion = val; current 43 kernel/trace/trace_branch.c if (current->trace_recursion & TRACE_BRANCH_BIT) current 57 kernel/trace/trace_branch.c current->trace_recursion |= TRACE_BRANCH_BIT; current 89 kernel/trace/trace_branch.c current->trace_recursion &= ~TRACE_BRANCH_BIT; current 597 kernel/trace/trace_events.c trace_ignore_this_task(pid_list, current)); current 1571 kernel/trace/trace_events.c trace_ignore_this_task(pid_list, current)); current 740 kernel/trace/trace_events_filter.c cmp = pred->regex.match(current->comm, &pred->regex, current 2329 kernel/trace/trace_events_hist.c save_comm(elt_data->comm, current); current 282 kernel/trace/trace_hwlat.c if (!cpumask_equal(current_mask, current->cpus_ptr)) current 355 kernel/trace/trace_irqsoff.c update_max_tr_single(tr, current, cpu); current 1152 kernel/trace/trace_kprobe.c val = (unsigned long)current->comm; current 47 kernel/trace/trace_sched_switch.c tracing_record_taskinfo(current, flags); current 538 kernel/trace/trace_sched_wakeup.c tracing_record_cmdline(current); current 550 kernel/trace/trace_sched_wakeup.c (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) current 588 kernel/trace/trace_sched_wakeup.c tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); current 1058 kernel/trace/trace_selftest.c sched_setattr(current, &attr); current 276 kernel/trace/trace_stack.c if (task_stack_end_corrupted(current)) { current 322 kernel/trace/trace_syscalls.c syscall_nr = trace_get_syscall_nr(current, regs); current 351 kernel/trace/trace_syscalls.c syscall_get_arguments(current, regs, args); current 370 kernel/trace/trace_syscalls.c syscall_nr = trace_get_syscall_nr(current, regs); current 398 kernel/trace/trace_syscalls.c entry->ret = syscall_get_return_value(current, regs); current 594 kernel/trace/trace_syscalls.c syscall_nr = trace_get_syscall_nr(current, regs); current 619 kernel/trace/trace_syscalls.c syscall_get_arguments(current, regs, args); current 693 kernel/trace/trace_syscalls.c syscall_nr = trace_get_syscall_nr(current, regs); current 717 kernel/trace/trace_syscalls.c rec->ret = syscall_get_return_value(current, regs); current 161 kernel/trace/trace_uprobe.c ret = strlcpy(dst, current->comm, maxlen); current 194 kernel/trace/trace_uprobe.c len = strlen(current->comm) + 1; current 212 kernel/trace/trace_uprobe.c udd = (void *) current->utask->vaddr; current 1382 kernel/trace/trace_uprobe.c if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) current 1470 kernel/trace/trace_uprobe.c current->utask->vaddr = (unsigned long) &udd; current 1505 kernel/trace/trace_uprobe.c current->utask->vaddr = (unsigned long) &udd; current 74 kernel/umh.c spin_lock_irq(¤t->sighand->siglock); current 75 kernel/umh.c flush_signal_handlers(current, 1); current 76 kernel/umh.c spin_unlock_irq(¤t->sighand->siglock); current 82 kernel/umh.c set_user_nice(current, 0); current 85 kernel/umh.c new = prepare_kernel_cred(current); current 105 kernel/umh.c sub_info->pid = task_pid_nr(current); current 110 kernel/umh.c current->flags |= PF_UMH; current 17 kernel/user-return-notifier.c set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); current 30 kernel/user-return-notifier.c clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); current 1268 kernel/user_namespace.c if (!thread_group_empty(current)) current 1271 kernel/user_namespace.c if (current->fs->users != 1) current 22 kernel/utsname_sysctl.c uts_ns = current->nsproxy->uts_ns; current 431 kernel/watchdog.c current) { current 451 kernel/watchdog.c current->comm, task_pid_nr(current)); current 452 kernel/watchdog.c __this_cpu_write(softlockup_task_ptr_saved, current); current 454 kernel/watchdog.c print_irqtrace_events(current); current 141 kernel/watchdog_hld.c print_irqtrace_events(current); current 955 kernel/workqueue.c WARN_ON_ONCE(worker->task != current); current 981 kernel/workqueue.c WARN_ON_ONCE(worker->task != current); current 2281 kernel/workqueue.c if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { current 2284 kernel/workqueue.c current->comm, preempt_count(), task_pid_nr(current), current 2286 kernel/workqueue.c debug_show_held_locks(current); current 2342 kernel/workqueue.c current->flags |= PF_WQ_WORKER; current 2344 kernel/workqueue.c current->flags &= ~PF_WQ_WORKER; current 2471 kernel/workqueue.c set_user_nice(current, RESCUER_NICE_LEVEL); current 2607 kernel/workqueue.c WARN_ONCE(current->flags & PF_MEMALLOC, current 2609 kernel/workqueue.c current->pid, current->comm, target_wq->name, target_func); current 2671 kernel/workqueue.c barr->task = current; current 67 kernel/workqueue_internal.h if (in_task() && (current->flags & PF_WQ_WORKER)) current 68 kernel/workqueue_internal.h return kthread_data(current); current 523 lib/debugobjects.c task_stack_page(current)); current 526 lib/debugobjects.c task_stack_page(current)); current 48 lib/dump_stack.c log_lvl, raw_smp_processor_id(), current->pid, current->comm, current 59 lib/dump_stack.c print_worker_info(log_lvl, current); current 533 lib/dynamic_debug.c task_pid_vnr(current)); current 106 lib/fault-inject.c unsigned int fail_nth = READ_ONCE(current->fail_nth); current 109 lib/fault-inject.c if (!WRITE_ONCE(current->fail_nth, fail_nth - 1)) current 120 lib/fault-inject.c if (attr->task_filter && !fail_task(attr, current)) current 17 lib/is_single_threaded.c struct task_struct *task = current; current 243 lib/klist.c waiter.process = current; current 1173 lib/locking-selftest.c current->softirqs_enabled = 0; current 1175 lib/locking-selftest.c current->softirqs_enabled = 1; current 1992 lib/locking-selftest.c lockdep_set_selftest_task(current); current 179 lib/nlattr.c current->comm, type); current 394 lib/nlattr.c rem, current->comm); current 26 lib/smp_processor_id.c if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) current 44 lib/smp_processor_id.c what1, what2, preempt_count() - 1, current->comm, current->pid); current 67 lib/syscall.c if (target == current) current 391 lib/test_vmalloc.c if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0) current 57 lib/ubsan.c return current->in_ubsan || was_reported(loc); current 145 lib/ubsan.c current->in_ubsan++; current 158 lib/ubsan.c current->in_ubsan--; current 814 lib/vsprintf.c if (!has_capability_noaudit(current, CAP_SYSLOG) || current 520 mm/compaction.c if (fatal_signal_pending(current)) { current 808 mm/compaction.c if (fatal_signal_pending(current)) current 1933 mm/compaction.c if (cc->contended || fatal_signal_pending(current)) current 2314 mm/compaction.c current->capture_control = &capc; current 2322 mm/compaction.c current->capture_control = NULL; current 2400 mm/compaction.c || fatal_signal_pending(current)) current 2631 mm/compaction.c struct task_struct *tsk = current; current 177 mm/filemap.c current->comm, page_to_pfn(page)); current 866 mm/filemap.c error = mem_cgroup_try_charge(page, current->mm, current 1185 mm/filemap.c if (signal_pending_state(state, current)) { current 2042 mm/filemap.c if (fatal_signal_pending(current)) { current 2914 mm/filemap.c send_sig(SIGXFSZ, current, 0); current 3279 mm/filemap.c if (fatal_signal_pending(current)) { current 3358 mm/filemap.c current->backing_dev_info = inode_to_bdi(inode); current 3418 mm/filemap.c current->backing_dev_info = NULL; current 37 mm/frame_vector.c struct mm_struct *mm = current->mm; current 401 mm/frontswap.c if (security_vm_enough_memory_mm(current->mm, pages)) { current 852 mm/gup.c if (fatal_signal_pending(current)) { current 1238 mm/gup.c return __get_user_pages(current, mm, start, nr_pages, gup_flags, current 1251 mm/gup.c struct mm_struct *mm = current->mm; current 1322 mm/gup.c if (__get_user_pages(current, current->mm, addr, 1, current 1626 mm/gup.c return __gup_longterm_locked(current, current->mm, start, nr_pages, current 1665 mm/gup.c return __get_user_pages_locked(current, current->mm, start, nr_pages, current 1689 mm/gup.c struct mm_struct *mm = current->mm; current 1703 mm/gup.c ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, current 2289 mm/gup.c pgdp = pgd_offset(current->mm, addr); current 2394 mm/gup.c down_read(¤t->mm->mmap_sem); current 2395 mm/gup.c ret = __gup_longterm_locked(current, current->mm, current 2398 mm/gup.c up_read(¤t->mm->mmap_sem); current 238 mm/highmem.c DECLARE_WAITQUEUE(wait, current); current 537 mm/huge_memory.c ret = current->mm->get_unmapped_area(filp, addr, len_pad, current 571 mm/huge_memory.c return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); current 2512 mm/hugetlb.c if (signal_pending(current)) current 3929 mm/hugetlb.c current->pid); current 4408 mm/hugetlb.c if (fatal_signal_pending(current)) { current 198 mm/hugetlb_cgroup.c h_cg = hugetlb_cgroup_from_task(current); current 76 mm/kasan/common.c track->pid = current->pid; current 82 mm/kasan/common.c current->kasan_depth++; current 87 mm/kasan/common.c current->kasan_depth--; current 75 mm/kasan/report.c info->access_addr, current->comm, task_pid_nr(current)); current 313 mm/kasan/report.c shadow_bottom = kasan_mem_to_shadow(end_of_stack(current)); current 361 mm/kasan/report.c addr, current->comm, task_pid_nr(current), offset); current 451 mm/kasan/report.c if (current->kasan_depth) current 2108 mm/khugepaged.c set_user_nice(current, MAX_NICE); current 607 mm/kmemleak.c object->pid = current->pid; current 614 mm/kmemleak.c strncpy(object->comm, current->comm, sizeof(object->comm)); current 1215 mm/kmemleak.c if (current->mm) current 1216 mm/kmemleak.c return signal_pending(current); current 1554 mm/kmemleak.c set_user_nice(current, 10); current 852 mm/ksm.c if (signal_pending(current)) current 2392 mm/ksm.c while (scan_npages-- && likely(!freezing(current))) { current 2412 mm/ksm.c set_user_nice(current, 5); current 291 mm/madvise.c up_read(¤t->mm->mmap_sem); current 296 mm/madvise.c down_read(¤t->mm->mmap_sem); current 314 mm/madvise.c if (fatal_signal_pending(current)) current 683 mm/madvise.c if (current->mm == mm) current 772 mm/madvise.c down_read(¤t->mm->mmap_sem); current 773 mm/madvise.c vma = find_vma(current->mm, start); current 854 mm/madvise.c up_read(¤t->mm->mmap_sem); current 860 mm/madvise.c down_read(¤t->mm->mmap_sem); current 1091 mm/madvise.c if (down_write_killable(¤t->mm->mmap_sem)) current 1094 mm/madvise.c down_read(¤t->mm->mmap_sem); current 1102 mm/madvise.c vma = find_vma_prev(current->mm, start, &prev); current 1139 mm/madvise.c vma = find_vma(current->mm, start); current 1144 mm/madvise.c up_write(¤t->mm->mmap_sem); current 1146 mm/madvise.c up_read(¤t->mm->mmap_sem); current 251 mm/memcontrol.c return tsk_is_oom_victim(current) || fatal_signal_pending(current) || current 252 mm/memcontrol.c (current->flags & PF_EXITING); current 1009 mm/memcontrol.c if (unlikely(current->active_memcg)) { current 1013 mm/memcontrol.c if (css_tryget_online(¤t->active_memcg->css)) current 1014 mm/memcontrol.c memcg = current->active_memcg; current 1018 mm/memcontrol.c return get_mem_cgroup_from_mm(current->mm); current 1379 mm/memcontrol.c if (mc.moving_task && current != mc.moving_task) { current 1920 mm/memcontrol.c if (!current->in_user_fault) current 1923 mm/memcontrol.c current->memcg_in_oom = memcg; current 1924 mm/memcontrol.c current->memcg_oom_gfp_mask = mask; current 1925 mm/memcontrol.c current->memcg_oom_order = order; current 1968 mm/memcontrol.c struct mem_cgroup *memcg = current->memcg_in_oom; current 1982 mm/memcontrol.c owait.wait.private = current; current 1996 mm/memcontrol.c mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, current 1997 mm/memcontrol.c current->memcg_oom_order); current 2014 mm/memcontrol.c current->memcg_in_oom = NULL; current 2125 mm/memcontrol.c memcg->move_lock_task = current; current 2140 mm/memcontrol.c if (memcg && memcg->move_lock_task == current) { current 2503 mm/memcontrol.c unsigned int nr_pages = current->memcg_nr_pages_over_high; current 2509 mm/memcontrol.c memcg = get_mem_cgroup_from_mm(current->mm); current 2511 mm/memcontrol.c current->memcg_nr_pages_over_high = 0; current 2600 mm/memcontrol.c if (unlikely(current->flags & PF_MEMALLOC)) current 2603 mm/memcontrol.c if (unlikely(task_in_memcg_oom(current))) current 2652 mm/memcontrol.c if (fatal_signal_pending(current)) current 2708 mm/memcontrol.c current->memcg_nr_pages_over_high += batch; current 2709 mm/memcontrol.c set_notify_resume(current); current 2910 mm/memcontrol.c if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) current 2945 mm/memcontrol.c if (unlikely(current->active_memcg)) current 2946 mm/memcontrol.c memcg = current->active_memcg; current 2948 mm/memcontrol.c memcg = mem_cgroup_from_task(current); current 3180 mm/memcontrol.c if (signal_pending(current)) { current 3341 mm/memcontrol.c if (signal_pending(current)) current 5733 mm/memcontrol.c mc.moving_task = current; current 6185 mm/memcontrol.c if (signal_pending(current)) { current 6873 mm/memcontrol.c memcg = mem_cgroup_from_task(current); current 218 mm/memory-failure.c if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { current 151 mm/memory.c if (current->rss_stat.count[i]) { current 152 mm/memory.c add_mm_counter(mm, i, current->rss_stat.count[i]); current 153 mm/memory.c current->rss_stat.count[i] = 0; current 156 mm/memory.c current->rss_stat.events = 0; current 161 mm/memory.c struct task_struct *task = current; current 175 mm/memory.c if (unlikely(task != current)) current 467 mm/memory.c if (current->mm == mm) current 517 mm/memory.c current->comm, current 4009 mm/memory.c check_sync_rss_stat(current); current 4036 mm/memory.c if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) current 4422 mm/memory.c struct mm_struct *mm = current->mm; current 4465 mm/memory.c if (current->mm) current 4466 mm/memory.c might_lock_read(¤t->mm->mmap_sem); current 1537 mm/memory_hotplug.c if (signal_pending(current)) { current 808 mm/mempolicy.c task_lock(current); current 811 mm/mempolicy.c task_unlock(current); current 815 mm/mempolicy.c old = current->mempolicy; current 816 mm/mempolicy.c current->mempolicy = new; current 818 mm/mempolicy.c current->il_prev = MAX_NUMNODES-1; current 819 mm/mempolicy.c task_unlock(current); current 875 mm/mempolicy.c struct mm_struct *mm = current->mm; current 877 mm/mempolicy.c struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL; current 887 mm/mempolicy.c task_lock(current); current 889 mm/mempolicy.c task_unlock(current); current 930 mm/mempolicy.c } else if (pol == current->mempolicy && current 932 mm/mempolicy.c *policy = next_node_in(current->il_prev, pol->v.nodes); current 952 mm/mempolicy.c task_lock(current); current 954 mm/mempolicy.c task_unlock(current); current 1170 mm/mempolicy.c vma = find_vma(current->mm, start); current 1221 mm/mempolicy.c struct mm_struct *mm = current->mm; current 1275 mm/mempolicy.c task_lock(current); current 1277 mm/mempolicy.c task_unlock(current); current 1487 mm/mempolicy.c task = pid ? find_task_by_vpid(pid) : current; current 1515 mm/mempolicy.c task_nodes = cpuset_mems_allowed(current); current 1745 mm/mempolicy.c pol = get_task_policy(current); current 1767 mm/mempolicy.c pol = get_task_policy(current); current 1829 mm/mempolicy.c struct task_struct *me = current; current 1849 mm/mempolicy.c policy = current->mempolicy; current 1982 mm/mempolicy.c if (!(mask && current->mempolicy)) current 1985 mm/mempolicy.c task_lock(current); current 1986 mm/mempolicy.c mempolicy = current->mempolicy; current 2005 mm/mempolicy.c task_unlock(current); current 2183 mm/mempolicy.c pol = get_task_policy(current); current 2230 mm/mempolicy.c if (old == current->mempolicy) { current 2231 mm/mempolicy.c task_lock(current); current 2233 mm/mempolicy.c task_unlock(current); current 2238 mm/mempolicy.c nodemask_t mems = cpuset_mems_allowed(current); current 2439 mm/mempolicy.c if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) current 2599 mm/mempolicy.c task_lock(current); current 2601 mm/mempolicy.c task_unlock(current); current 1029 mm/migrate.c if (current->flags & PF_MEMALLOC) current 1409 mm/migrate.c int swapwrite = current->flags & PF_SWAPWRITE; current 1413 mm/migrate.c current->flags |= PF_SWAPWRITE; current 1483 mm/migrate.c current->flags &= ~PF_SWAPWRITE; current 1805 mm/migrate.c task = pid ? find_task_by_vpid(pid) : current; current 213 mm/mincore.c vma = find_vma(current->mm, addr); current 286 mm/mincore.c down_read(¤t->mm->mmap_sem); current 288 mm/mincore.c up_read(¤t->mm->mmap_sem); current 530 mm/mlock.c is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || current 597 mm/mlock.c vma = find_vma(current->mm, start); current 646 mm/mlock.c mm = current->mm; current 689 mm/mlock.c if (down_write_killable(¤t->mm->mmap_sem)) current 692 mm/mlock.c locked += current->mm->locked_vm; current 700 mm/mlock.c locked -= count_mm_mlocked_page_nr(current->mm, current 708 mm/mlock.c up_write(¤t->mm->mmap_sem); current 745 mm/mlock.c if (down_write_killable(¤t->mm->mmap_sem)) current 748 mm/mlock.c up_write(¤t->mm->mmap_sem); current 768 mm/mlock.c current->mm->def_flags &= VM_LOCKED_CLEAR_MASK; current 770 mm/mlock.c current->mm->def_flags |= VM_LOCKED; current 773 mm/mlock.c current->mm->def_flags |= VM_LOCKONFAULT; current 785 mm/mlock.c for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { current 814 mm/mlock.c if (down_write_killable(¤t->mm->mmap_sem)) current 818 mm/mlock.c if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || current 821 mm/mlock.c up_write(¤t->mm->mmap_sem); current 832 mm/mlock.c if (down_write_killable(¤t->mm->mmap_sem)) current 835 mm/mlock.c up_write(¤t->mm->mmap_sem); current 191 mm/mmap.c struct mm_struct *mm = current->mm; current 209 mm/mmap.c if (current->brk_randomized) current 1395 mm/mmap.c struct mm_struct *mm = current->mm; current 1409 mm/mmap.c if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) current 1715 mm/mmap.c struct mm_struct *mm = current->mm; current 1835 mm/mmap.c vma == get_gate_vma(current->mm)) current 1887 mm/mmap.c struct mm_struct *mm = current->mm; current 1982 mm/mmap.c struct mm_struct *mm = current->mm; current 2104 mm/mmap.c struct mm_struct *mm = current->mm; current 2144 mm/mmap.c struct mm_struct *mm = current->mm; current 2205 mm/mmap.c get_area = current->mm->get_unmapped_area; current 2852 mm/mmap.c struct mm_struct *mm = current->mm; current 2895 mm/mmap.c struct mm_struct *mm = current->mm; current 2902 mm/mmap.c current->comm, current->pid); current 2994 mm/mmap.c struct mm_struct *mm = current->mm; current 3066 mm/mmap.c struct mm_struct *mm = current->mm; current 3300 mm/mmap.c current->comm, current->pid, current 3342 mm/mmap.c if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) current 3557 mm/mmap.c if (signal_pending(current)) current 3565 mm/mmap.c if (signal_pending(current)) current 3573 mm/mmap.c if (signal_pending(current)) current 25 mm/mmu_context.c struct task_struct *tsk = current; current 55 mm/mmu_context.c struct task_struct *tsk = current; current 399 mm/mprotect.c error = walk_page_range(current->mm, start, end, current 493 mm/mprotect.c const bool rier = (current->personality & READ_IMPLIES_EXEC) && current 515 mm/mprotect.c if (down_write_killable(¤t->mm->mmap_sem)) current 523 mm/mprotect.c if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) current 526 mm/mprotect.c vma = find_vma(current->mm, start); current 605 mm/mprotect.c up_write(¤t->mm->mmap_sem); current 635 mm/mprotect.c down_write(¤t->mm->mmap_sem); current 636 mm/mprotect.c pkey = mm_pkey_alloc(current->mm); current 642 mm/mprotect.c ret = arch_set_user_pkey_access(current, pkey, init_val); current 644 mm/mprotect.c mm_pkey_free(current->mm, pkey); current 649 mm/mprotect.c up_write(¤t->mm->mmap_sem); current 657 mm/mprotect.c down_write(¤t->mm->mmap_sem); current 658 mm/mprotect.c ret = mm_pkey_free(current->mm, pkey); current 659 mm/mprotect.c up_write(¤t->mm->mmap_sem); current 436 mm/mremap.c struct mm_struct *mm = current->mm; current 452 mm/mremap.c pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid); current 504 mm/mremap.c struct mm_struct *mm = current->mm; current 599 mm/mremap.c struct mm_struct *mm = current->mm; current 641 mm/mremap.c if (down_write_killable(¤t->mm->mmap_sem)) current 732 mm/mremap.c up_read(¤t->mm->mmap_sem); current 734 mm/mremap.c up_write(¤t->mm->mmap_sem); current 35 mm/msync.c struct mm_struct *mm = current->mm; current 102 mm/nommu.c vma = find_vma(current->mm, (unsigned long)objp); current 166 mm/nommu.c down_write(¤t->mm->mmap_sem); current 167 mm/nommu.c vma = find_vma(current->mm, (unsigned long)ret); current 170 mm/nommu.c up_write(¤t->mm->mmap_sem); current 416 mm/nommu.c struct mm_struct *mm = current->mm; current 655 mm/nommu.c struct task_struct *curr = current; current 908 mm/nommu.c if (current->personality & READ_IMPLIES_EXEC) { current 927 mm/nommu.c (current->personality & READ_IMPLIES_EXEC)) current 973 mm/nommu.c if ((flags & MAP_PRIVATE) && current->ptrace) current 1090 mm/nommu.c len, current->pid, current->comm); current 1136 mm/nommu.c vma = vm_area_alloc(current->mm); current 1277 mm/nommu.c current->mm->total_vm += len >> PAGE_SHIFT; current 1280 mm/nommu.c add_vma_to_mm(current->mm, vma); current 1313 mm/nommu.c len, current->pid); current 1319 mm/nommu.c len, current->pid); current 1503 mm/nommu.c current->pid, current->comm, current 1547 mm/nommu.c struct mm_struct *mm = current->mm; current 1615 mm/nommu.c vma = find_vma_exact(current->mm, addr, old_len); current 1639 mm/nommu.c down_write(¤t->mm->mmap_sem); current 1641 mm/nommu.c up_write(¤t->mm->mmap_sem); current 110 mm/oom_kill.c ret = cpuset_mems_allowed_intersects(current, tsk); current 455 mm/oom_kill.c current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, current 456 mm/oom_kill.c current->signal->oom_score_adj); current 1062 mm/oom_kill.c if (task_will_free_mem(current)) { current 1063 mm/oom_kill.c mark_oom_victim(current); current 1064 mm/oom_kill.c wake_oom_reaper(current); current 1088 mm/oom_kill.c current->mm && !oom_unkillable_task(current) && current 1089 mm/oom_kill.c oom_cpuset_eligible(current, oc) && current 1090 mm/oom_kill.c current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { current 1091 mm/oom_kill.c get_task_struct(current); current 1092 mm/oom_kill.c oc->chosen = current; current 438 mm/page-writeback.c tsk = current; current 480 mm/page-writeback.c struct task_struct *tsk = current; current 1659 mm/page-writeback.c current->dirty_paused_when = now; current 1660 mm/page-writeback.c current->nr_dirtied = 0; current 1663 mm/page-writeback.c current->nr_dirtied_pause = min(intv, m_intv); current 1729 mm/page-writeback.c if (current->dirty_paused_when) current 1730 mm/page-writeback.c pause -= now - current->dirty_paused_when; current 1752 mm/page-writeback.c current->dirty_paused_when = now; current 1753 mm/page-writeback.c current->nr_dirtied = 0; current 1755 mm/page-writeback.c current->dirty_paused_when += period; current 1756 mm/page-writeback.c current->nr_dirtied = 0; current 1757 mm/page-writeback.c } else if (current->nr_dirtied_pause <= pages_dirtied) current 1758 mm/page-writeback.c current->nr_dirtied_pause += pages_dirtied; current 1784 mm/page-writeback.c current->dirty_paused_when = now + pause; current 1785 mm/page-writeback.c current->nr_dirtied = 0; current 1786 mm/page-writeback.c current->nr_dirtied_pause = nr_dirtied_pause; current 1808 mm/page-writeback.c if (fatal_signal_pending(current)) current 1880 mm/page-writeback.c ratelimit = current->nr_dirtied_pause; current 1892 mm/page-writeback.c if (unlikely(current->nr_dirtied >= ratelimit)) current 1904 mm/page-writeback.c if (*p > 0 && current->nr_dirtied < ratelimit) { current 1906 mm/page-writeback.c nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); current 1908 mm/page-writeback.c current->nr_dirtied += nr_pages_dirtied; current 1912 mm/page-writeback.c if (unlikely(current->nr_dirtied >= ratelimit)) current 1913 mm/page-writeback.c balance_dirty_pages(wb, current->nr_dirtied); current 2430 mm/page-writeback.c current->nr_dirtied++; current 2514 mm/page-writeback.c current->nr_dirtied--; current 640 mm/page_alloc.c current->comm, page_to_pfn(page)); current 825 mm/page_alloc.c struct capture_control *capc = current->capture_control; current 828 mm/page_alloc.c !(current->flags & PF_KTHREAD) && current 1781 mm/page_alloc.c set_cpus_allowed_ptr(current, cpumask); current 3736 mm/page_alloc.c if (tsk_is_oom_victim(current) || current 3737 mm/page_alloc.c (current->flags & (PF_MEMALLOC | PF_EXITING))) current 3758 mm/page_alloc.c current->comm, &vaf, gfp_mask, &gfp_mask, current 3827 mm/page_alloc.c if (current->flags & PF_DUMPCORE) current 4067 mm/page_alloc.c if (current->flags & PF_MEMALLOC) current 4209 mm/page_alloc.c } else if (unlikely(rt_task(current)) && !in_interrupt()) current 4247 mm/page_alloc.c if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) current 4250 mm/page_alloc.c if (current->flags & PF_MEMALLOC) current 4252 mm/page_alloc.c else if (oom_reserves_allowed(current)) current 4358 mm/page_alloc.c if (current->flags & PF_WQ_WORKER) current 4556 mm/page_alloc.c if (current->flags & PF_MEMALLOC) current 4609 mm/page_alloc.c if (tsk_is_oom_victim(current) && current 4642 mm/page_alloc.c WARN_ON_ONCE(current->flags & PF_MEMALLOC); current 8330 mm/page_alloc.c if (fatal_signal_pending(current)) { current 403 mm/page_io.c get_task_struct(current); current 404 mm/page_io.c bio->bi_private = current; current 584 mm/rmap.c struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; current 597 mm/rmap.c struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; current 605 mm/rmap.c struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; current 172 mm/shmem.c 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); current 186 mm/shmem.c return security_vm_enough_memory_mm(current->mm, current 205 mm/shmem.c return security_vm_enough_memory_mm(current->mm, current 1633 mm/shmem.c struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; current 1768 mm/shmem.c charge_mm = vma ? vma->vm_mm : current->mm; current 2088 mm/shmem.c get_area = current->mm->get_unmapped_area; current 2808 mm/shmem.c if (signal_pending(current)) current 4075 mm/shmem.c return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); current 1398 mm/slab.c if (current->reclaim_state) current 1399 mm/slab.c current->reclaim_state->reclaimed_slab += 1 << order; current 3088 mm/slab.c else if (current->mempolicy) current 3271 mm/slab.c if (current->mempolicy || cpuset_do_slab_mem_spread()) { current 214 mm/slob.c if (current->reclaim_state) current 215 mm/slob.c current->reclaim_state->reclaimed_slab += 1 << order; current 567 mm/slub.c p->pid = current->pid; current 1728 mm/slub.c if (current->reclaim_state) current 1729 mm/slub.c current->reclaim_state->reclaimed_slab += pages; current 2152 mm/swapfile.c !signal_pending(current) && current 2181 mm/swapfile.c !signal_pending(current) && current 2223 mm/swapfile.c if (!signal_pending(current)) current 2531 mm/swapfile.c BUG_ON(!current->mm); current 2557 mm/swapfile.c if (!security_vm_enough_memory_mm(current->mm, p->pages)) current 3754 mm/swapfile.c if (current->throttle_queue) current 36 mm/usercopy.c const void * const stack = task_stack_page(current); current 320 mm/userfaultfd.c if (fatal_signal_pending(current)) current 584 mm/userfaultfd.c if (fatal_signal_pending(current)) current 298 mm/util.c struct task_struct * __maybe_unused t = current; current 311 mm/util.c if (current->flags & PF_RANDOMIZE) { current 349 mm/util.c if (current->personality & ADDR_COMPAT_LAYOUT) current 371 mm/util.c if (current->flags & PF_RANDOMIZE) current 390 mm/util.c if (current->flags & PF_RANDOMIZE) current 475 mm/util.c ret = __account_locked_vm(mm, pages, inc, current, current 488 mm/util.c struct mm_struct *mm = current->mm; current 33 mm/vmacache.c return current->mm == mm && !(current->flags & PF_KTHREAD); current 39 mm/vmacache.c current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma; current 49 mm/vmacache.c curr = current; current 73 mm/vmacache.c struct vm_area_struct *vma = current->vmacache.vmas[idx]; current 106 mm/vmacache.c struct vm_area_struct *vma = current->vmacache.vmas[idx]; current 780 mm/vmscan.c if (current->flags & PF_SWAPWRITE) current 784 mm/vmscan.c if (inode_to_bdi(inode) == current->backing_dev_info) current 1938 mm/vmscan.c return !(current->flags & PF_LESS_THROTTLE) || current 1939 mm/vmscan.c current->backing_dev_info == NULL || current 1940 mm/vmscan.c bdi_write_congested(current->backing_dev_info); current 1971 mm/vmscan.c if (fatal_signal_pending(current)) current 2758 mm/vmscan.c struct reclaim_state *reclaim_state = current->reclaim_state; current 3194 mm/vmscan.c if (current->flags & PF_KTHREAD) current 3201 mm/vmscan.c if (fatal_signal_pending(current)) current 3257 mm/vmscan.c if (fatal_signal_pending(current)) current 3296 mm/vmscan.c set_task_reclaim_state(current, &sc.reclaim_state); current 3302 mm/vmscan.c set_task_reclaim_state(current, NULL); current 3325 mm/vmscan.c WARN_ON_ONCE(!current->reclaim_state); current 3371 mm/vmscan.c set_task_reclaim_state(current, &sc.reclaim_state); current 3392 mm/vmscan.c set_task_reclaim_state(current, NULL); current 3594 mm/vmscan.c set_task_reclaim_state(current, &sc.reclaim_state); current 3775 mm/vmscan.c set_task_reclaim_state(current, NULL); current 3807 mm/vmscan.c if (freezing(current) || kthread_should_stop()) current 3899 mm/vmscan.c struct task_struct *tsk = current; current 4045 mm/vmscan.c set_task_reclaim_state(current, &sc.reclaim_state); current 4049 mm/vmscan.c set_task_reclaim_state(current, NULL); current 4213 mm/vmscan.c struct task_struct *p = current; current 4251 mm/vmscan.c current->flags &= ~PF_SWAPWRITE; current 4281 mm/vmscan.c if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) current 741 net/9p/client.c if (signal_pending(current)) { current 790 net/9p/client.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 792 net/9p/client.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 840 net/9p/client.c if (signal_pending(current)) { current 872 net/9p/client.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 874 net/9p/client.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 1463 net/9p/client.c __func__, task_pid_nr(current)); current 47 net/9p/mod.c pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf); current 49 net/9p/mod.c pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf); current 663 net/9p/trans_fd.c m, current, &req->tc, req->tc.id); current 835 net/9p/trans_fd.c __func__, task_pid_nr(current)); current 965 net/9p/trans_fd.c err = __sock_create(current->nsproxy->net_ns, PF_INET, current 969 net/9p/trans_fd.c __func__, task_pid_nr(current)); current 977 net/9p/trans_fd.c __func__, task_pid_nr(current)); current 988 net/9p/trans_fd.c __func__, task_pid_nr(current), addr); current 1010 net/9p/trans_fd.c __func__, task_pid_nr(current), addr); current 1016 net/9p/trans_fd.c err = __sock_create(current->nsproxy->net_ns, PF_UNIX, current 1020 net/9p/trans_fd.c __func__, task_pid_nr(current)); current 1028 net/9p/trans_fd.c __func__, task_pid_nr(current), addr, err); current 1112 net/9p/trans_fd.c p9_debug(P9_DEBUG_TRANS, "start %p\n", current); current 661 net/9p/trans_rdma.c __func__, task_pid_nr(current), -err); current 1208 net/appletalk/ddp.c current->comm); current 590 net/atm/common.c send_sig(SIGPIPE, current, 0); current 611 net/atm/common.c if (signal_pending(current)) { current 619 net/atm/common.c send_sig(SIGPIPE, current, 0); current 86 net/atm/ioctl.c current->comm, task_pid_nr(current)); current 217 net/atm/svc.c if (!signal_pending(current)) { current 360 net/atm/svc.c if (signal_pending(current)) { current 1224 net/ax25/af_ax25.c current->comm); current 1299 net/ax25/af_ax25.c if (!signal_pending(current)) { current 1370 net/ax25/af_ax25.c if (!signal_pending(current)) { current 1469 net/ax25/af_ax25.c send_sig(SIGPIPE, current, 0); current 1797 net/ax25/af_ax25.c current->comm); current 50 net/batman-adv/debugfs.c current->comm, task_pid_nr(current), name, alt); current 53 net/batman-adv/sysfs.c current->comm, task_pid_nr(current), attr->name); current 302 net/bluetooth/af_bluetooth.c DECLARE_WAITQUEUE(wait, current); current 314 net/bluetooth/af_bluetooth.c if (signal_pending(current) || !timeo) current 368 net/bluetooth/af_bluetooth.c if (signal_pending(current)) { current 536 net/bluetooth/af_bluetooth.c DECLARE_WAITQUEUE(wait, current); current 549 net/bluetooth/af_bluetooth.c if (signal_pending(current)) { current 572 net/bluetooth/af_bluetooth.c DECLARE_WAITQUEUE(wait, current); current 588 net/bluetooth/af_bluetooth.c if (signal_pending(current)) { current 488 net/bluetooth/bnep/core.c set_user_nice(current, -15); current 381 net/bluetooth/cmtp/capi.c DECLARE_WAITQUEUE(wait, current); current 432 net/bluetooth/cmtp/capi.c if (signal_pending(current)) { current 287 net/bluetooth/cmtp/core.c set_user_nice(current, -15); current 97 net/bluetooth/hci_sock.c get_task_comm(hci_pi(sk)->comm, current); current 1275 net/bluetooth/hidp/core.c set_user_nice(current, -15); current 335 net/bluetooth/l2cap_sock.c if (signal_pending(current)) { current 1063 net/bluetooth/l2cap_sock.c DECLARE_WAITQUEUE(wait, current); current 1079 net/bluetooth/l2cap_sock.c if (signal_pending(current)) { current 1173 net/bluetooth/l2cap_sock.c !(current->flags & PF_EXITING)) current 2080 net/bluetooth/rfcomm/core.c set_user_nice(current, -10); current 512 net/bluetooth/rfcomm/sock.c if (signal_pending(current)) { current 928 net/bluetooth/rfcomm/sock.c !(current->flags & PF_EXITING)) current 659 net/bluetooth/sco.c if (signal_pending(current)) { current 987 net/bluetooth/sco.c !(current->flags & PF_EXITING)) current 1011 net/bluetooth/sco.c !(current->flags & PF_EXITING)) { current 46 net/bpf/test_run.c if (signal_pending(current)) { current 286 net/bpf/test_run.c sock_net_set(sk, current->nsproxy->net_ns); current 300 net/bpf/test_run.c skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); current 365 net/bpf/test_run.c rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); current 451 net/bpf/test_run.c if (signal_pending(current)) { current 47 net/bpfilter/bpfilter_kern.c req.pid = current->pid; current 322 net/caif/caif_socket.c signal_pending(current) || current 413 net/caif/caif_socket.c if (signal_pending(current)) { current 478 net/caif/caif_socket.c if (signal_pending(current)) current 666 net/caif/caif_socket.c send_sig(SIGPIPE, current, 0); current 144 net/ceph/ceph_common.c if (!net_eq(current->nsproxy->net_ns, read_pnet(&client->msgr.net))) current 1894 net/ceph/messenger.c ip_len = dns_query(current->nsproxy->net_ns, current 3058 net/ceph/messenger.c write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); current 117 net/core/datagram.c if (signal_pending(current)) current 7680 net/core/dev.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 7683 net/core/dev.c audit_get_sessionid(current)); current 1875 net/core/ethtool.c } while (!signal_pending(current) && --i != 0); current 1876 net/core/ethtool.c } while (!signal_pending(current) && current 118 net/core/flow_dissector.c net = current->nsproxy->net_ns; current 172 net/core/flow_dissector.c return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns); current 1575 net/core/net-sysfs.c struct net *net = current->nsproxy->net_ns; current 1582 net/core/net-sysfs.c struct net *ns = current->nsproxy->net_ns; current 646 net/core/netpoll.c struct net *net = current->nsproxy->net_ns; current 502 net/core/pktgen.c struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); current 2181 net/core/pktgen.c } while (t.task && pkt_dev->running && !signal_pending(current)); current 3070 net/core/pktgen.c if (signal_pending(current)) current 3288 net/core/pktgen.c if (signal_pending(current)) current 3473 net/core/pktgen.c pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); current 54 net/core/scm.c if ((creds->pid == task_tgid_vnr(current) || current 55 net/core/scm.c ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && current 405 net/core/sock.c __func__, current->comm, task_pid_nr(current)); current 421 net/core/sock.c if (strcmp(warncomm, current->comm) && warned < 5) { current 422 net/core/sock.c strcpy(warncomm, current->comm); current 2196 net/core/sock.c if (signal_pending(current)) current 2243 net/core/sock.c if (signal_pending(current)) current 59 net/core/stream.c struct task_struct *tsk = current; current 106 net/core/stream.c } while (!signal_pending(current) && timeout); current 137 net/core/stream.c if (signal_pending(current)) current 188 net/core/stream.c send_sig(SIGPIPE, current, 0); current 232 net/dccp/output.c if (signal_pending(current) || sk->sk_err) current 899 net/dccp/proto.c if (signal_pending(current)) { current 841 net/decnet/af_decnet.c if (signal_pending(current)) current 882 net/decnet/af_decnet.c if (signal_pending(current)) current 1053 net/decnet/af_decnet.c if (signal_pending(current)) current 1734 net/decnet/af_decnet.c if (signal_pending(current)) { current 1952 net/decnet/af_decnet.c send_sig(SIGPIPE, current, 0); current 1979 net/decnet/af_decnet.c if (signal_pending(current)) { current 47 net/dns_resolver/internal.h current->comm, ##__VA_ARGS__); \ current 425 net/ipv4/af_inet.c !(current->flags & PF_EXITING)) current 591 net/ipv4/af_inet.c if (signal_pending(current) || !timeo) current 684 net/ipv4/af_inet.c if (signal_pending(current)) current 429 net/ipv4/inet_connection_sock.c if (signal_pending(current)) current 1629 net/ipv4/ip_output.c ¤t->task_frag, getfrag, current 543 net/ipv4/raw.c __func__, current->comm); current 833 net/ipv4/tcp.c if (signal_pending(current)) { current 849 net/ipv4/tcp.c signal_pending(current)) current 1766 net/ipv4/tcp.c down_read(¤t->mm->mmap_sem); current 1768 net/ipv4/tcp.c vma = find_vma(current->mm, address); current 1770 net/ipv4/tcp.c up_read(¤t->mm->mmap_sem); current 1829 net/ipv4/tcp.c up_read(¤t->mm->mmap_sem); current 2020 net/ipv4/tcp.c if (signal_pending(current)) { current 2064 net/ipv4/tcp.c signal_pending(current)) current 2091 net/ipv4/tcp.c if (signal_pending(current)) { current 2110 net/ipv4/tcp.c current->comm, current 2111 net/ipv4/tcp.c task_pid_nr(current)); current 939 net/ipv4/tcp_output.c if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) current 443 net/ipv6/ip6_flowlabel.c fl->owner.pid = get_task_pid(current, PIDTYPE_PID); current 1875 net/ipv6/ip6_output.c ¤t->task_frag, getfrag, from, current 1827 net/ipv6/ndisc.c if (strcmp(warncomm, current->comm) && warned < 5) { current 1828 net/ipv6/ndisc.c strcpy(warncomm, current->comm); current 66 net/iucv/af_iucv.c if (signal_pending(current)) { \ current 962 net/iucv/af_iucv.c DECLARE_WAITQUEUE(wait, current); current 994 net/iucv/af_iucv.c if (signal_pending(current)) { current 1106 net/kcm/kcmsock.c if (signal_pending(current)) { current 280 net/l2tp/l2tp_debugfs.c pd->net = get_net_ns_by_pid(current->pid); current 556 net/l2tp/l2tp_ppp.c ps->owner = current->pid; current 497 net/llc/af_llc.c if (signal_pending(current)) current 561 net/llc/af_llc.c if (signal_pending(current)) current 580 net/llc/af_llc.c if (signal_pending(current) || !timeout) current 603 net/llc/af_llc.c if (signal_pending(current)) current 631 net/llc/af_llc.c if (signal_pending(current)) current 767 net/llc/af_llc.c if (signal_pending(current)) { current 829 net/llc/af_llc.c current->comm, current 830 net/llc/af_llc.c task_pid_nr(current)); current 1761 net/netfilter/ipvs/ip_vs_sync.c IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); current 1949 net/netfilter/ipvs/ip_vs_sync.c IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); current 2391 net/netfilter/nf_conntrack_core.c if (current->nsproxy->net_ns != &init_net) current 6241 net/netfilter/nf_tables_api.c nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) || current 6242 net/netfilter/nf_tables_api.c nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current))) current 359 net/netfilter/nfnetlink.c if (fatal_signal_pending(current)) { current 1534 net/netlabel/netlabel_unlabeled.c security_task_getsecid(current, &audit_info.secid); current 37 net/netlabel/netlabel_user.h security_task_getsecid(current, &audit_info->secid); current 38 net/netlabel/netlabel_user.h audit_info->loginuid = audit_get_loginuid(current); current 39 net/netlabel/netlabel_user.h audit_info->sessionid = audit_get_sessionid(current); current 428 net/netlink/af_netlink.c DECLARE_WAITQUEUE(wait, current); current 810 net/netlink/af_netlink.c s32 portid = task_tgid_vnr(current); current 1212 net/netlink/af_netlink.c DECLARE_WAITQUEUE(wait, current); current 1233 net/netlink/af_netlink.c if (signal_pending(current)) { current 715 net/netrom/af_netrom.c if (!signal_pending(current)) { current 780 net/netrom/af_netrom.c if (!signal_pending(current)) { current 1031 net/netrom/af_netrom.c send_sig(SIGPIPE, current, 0); current 19 net/nfc/llcp_sock.c DECLARE_WAITQUEUE(wait, current); current 33 net/nfc/llcp_sock.c if (signal_pending(current)) { current 441 net/nfc/llcp_sock.c DECLARE_WAITQUEUE(wait, current); current 467 net/nfc/llcp_sock.c if (signal_pending(current)) { current 1166 net/phonet/pep.c if (signal_pending(current)) { current 223 net/phonet/socket.c struct task_struct *tsk = current; current 682 net/rds/tcp.c struct net *net = current->nsproxy->net_ns; current 823 net/rose/af_rose.c if (!signal_pending(current)) { current 890 net/rose/af_rose.c if (!signal_pending(current)) { current 1046 net/rose/af_rose.c send_sig(SIGPIPE, current, 0); current 1187 net/rxrpc/ar-internal.h printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) current 649 net/rxrpc/conn_client.c DECLARE_WAITQUEUE(myself, current); current 672 net/rxrpc/conn_client.c signal_pending(current)) { current 487 net/rxrpc/recvmsg.c if (signal_pending(current)) current 50 net/rxrpc/sendmsg.c if (signal_pending(current)) current 90 net/rxrpc/sendmsg.c tx_win == tx_start && signal_pending(current)) current 132 net/rxrpc/sendmsg.c DECLARE_WAITQUEUE(myself, current); current 1568 net/sctp/socket.c send_sig(SIGPIPE, current, 0); current 2811 net/sctp/socket.c current->comm, task_pid_nr(current)); current 3296 net/sctp/socket.c current->comm, task_pid_nr(current)); current 3581 net/sctp/socket.c current->comm, task_pid_nr(current)); current 5654 net/sctp/socket.c if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) current 6093 net/sctp/socket.c current->comm, task_pid_nr(current)); current 6787 net/sctp/socket.c current->comm, task_pid_nr(current)); current 6882 net/sctp/socket.c current->comm, task_pid_nr(current)); current 8818 net/sctp/socket.c if (signal_pending(current)) current 9063 net/sctp/socket.c if (signal_pending(current)) current 9173 net/sctp/socket.c if (signal_pending(current)) current 9241 net/sctp/socket.c if (signal_pending(current)) current 9265 net/sctp/socket.c } while (!signal_pending(current) && timeout); current 329 net/sctp/sysctl.c struct net *net = current->nsproxy->net_ns; current 375 net/sctp/sysctl.c struct net *net = current->nsproxy->net_ns; current 404 net/sctp/sysctl.c struct net *net = current->nsproxy->net_ns; current 444 net/sctp/sysctl.c struct net *net = current->nsproxy->net_ns; current 799 net/smc/af_smc.c else if (signal_pending(current)) current 1440 net/smc/af_smc.c DECLARE_WAITQUEUE(wait, current); current 1469 net/smc/af_smc.c if (signal_pending(current)) { current 295 net/smc/smc_clc.c if (signal_pending(current)) { current 63 net/smc/smc_close.c while (!signal_pending(current) && timeout) { current 187 net/smc/smc_close.c timeout = current->flags & PF_EXITING ? current 438 net/smc/smc_close.c timeout = current->flags & PF_EXITING ? current 334 net/smc/smc_rx.c signal_pending(current)) current 351 net/smc/smc_rx.c if (signal_pending(current)) { current 103 net/smc/smc_tx.c if (signal_pending(current)) { current 1369 net/socket.c current->comm); current 1469 net/socket.c return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); current 694 net/sunrpc/auth_gss/auth_gss.c if (fatal_signal_pending(current)) { current 1834 net/sunrpc/clnt.c if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { current 289 net/sunrpc/sched.c if (signal_pending_state(mode, current)) current 1074 net/sunrpc/sched.c task->tk_owner = current->tgid; current 1090 net/sunrpc/sched.c task_pid_nr(current)); current 1142 net/sunrpc/svc.c spin_lock_irqsave(¤t->sighand->siglock, flags); current 1144 net/sunrpc/svc.c spin_unlock_irqrestore(¤t->sighand->siglock, flags); current 693 net/sunrpc/svc_xprt.c if (freezing(current)) current 33 net/sysctl_net.c return ¤t->nsproxy->net_ns->sysctls; current 38 net/sysctl_net.c return ¤t->nsproxy->net_ns->sysctls == set; current 374 net/tipc/socket.c if (signal_pending(current)) current 1716 net/tipc/socket.c if (signal_pending(current)) current 2352 net/tipc/socket.c if (signal_pending(current)) current 2513 net/tipc/socket.c if (signal_pending(current)) current 87 net/tls/tls_main.c if (signal_pending(current)) { current 1315 net/tls/tls_sw.c if (signal_pending(current)) { current 594 net/unix/af_unix.c sk->sk_peer_pid = get_pid(task_tgid(current)); current 1285 net/unix/af_unix.c if (signal_pending(current)) current 1543 net/unix/af_unix.c UNIXCB(skb).pid = get_pid(task_tgid(current)); current 1560 net/unix/af_unix.c scm->pid = get_pid(task_tgid(current)); current 1732 net/unix/af_unix.c if (signal_pending(current)) current 1879 net/unix/af_unix.c send_sig(SIGPIPE, current, 0); current 1996 net/unix/af_unix.c send_sig(SIGPIPE, current, 0); current 2165 net/unix/af_unix.c signal_pending(current) || current 2283 net/unix/af_unix.c if (signal_pending(current)) { current 107 net/unix/scm.c if (too_many_unix_fds(current)) current 1218 net/vmw_vsock/af_vsock.c if (signal_pending(current)) { current 1288 net/vmw_vsock/af_vsock.c if (signal_pending(current)) { current 1594 net/vmw_vsock/af_vsock.c if (signal_pending(current)) { current 1760 net/vmw_vsock/af_vsock.c if (signal_pending(current)) { current 741 net/vmw_vsock/virtio_transport_common.c } while (!signal_pending(current) && timeout); current 808 net/vmw_vsock/virtio_transport_common.c if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING)) current 711 net/x25/af_x25.c DECLARE_WAITQUEUE(wait, current); current 718 net/x25/af_x25.c if (signal_pending(current)) current 838 net/x25/af_x25.c DECLARE_WAITQUEUE(wait, current); current 847 net/x25/af_x25.c if (signal_pending(current)) current 1117 net/x25/af_x25.c send_sig(SIGPIPE, current, 0); current 293 net/xdp/xdp_umem.c down_read(¤t->mm->mmap_sem); current 296 net/xdp/xdp_umem.c up_read(¤t->mm->mmap_sem); current 44 samples/kprobes/kretprobe_example.c if (!current->mm) current 37 samples/trace_events/trace-events-sample.c current->cpus_ptr); current 59 scripts/kconfig/gconf.c static struct menu *current; // current node for SINGLE view current 710 scripts/kconfig/gconf.c current = current->parent; current 711 scripts/kconfig/gconf.c ptype = current->prompt ? current->prompt->type : P_UNKNOWN; current 713 scripts/kconfig/gconf.c current = current->parent; current 716 scripts/kconfig/gconf.c if (current == &rootmenu) current 731 scripts/kconfig/gconf.c current = &rootmenu; current 913 scripts/kconfig/gconf.c current = menu; current 1020 scripts/kconfig/gconf.c current = menu; current 1342 scripts/kconfig/gconf.c current = &rootmenu; current 1407 scripts/kconfig/gconf.c display_tree(current); current 155 security/apparmor/audit.c sa->u.tsk : current); current 67 security/apparmor/domain.c tracer = ptrace_parent(current); current 892 security/apparmor/domain.c ctx = task_ctx(current); current 1169 security/apparmor/domain.c struct aa_task_ctx *ctx = task_ctx(current); current 1188 security/apparmor/domain.c if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp) current 1215 security/apparmor/domain.c if (task_no_new_privs(current) && !unconfined(label) && current 1236 security/apparmor/domain.c if (task_no_new_privs(current) && !unconfined(label) && current 1319 security/apparmor/domain.c struct aa_task_ctx *ctx = task_ctx(current); current 1333 security/apparmor/domain.c if (task_no_new_privs(current) && !unconfined(label) && !ctx->nnp) current 1431 security/apparmor/domain.c if (task_no_new_privs(current) && !unconfined(label) && current 100 security/apparmor/lsm.c aa_dup_task_ctx(new, task_ctx(current)); current 406 security/apparmor/lsm.c if (current->in_execve) { current 581 security/apparmor/lsm.c struct aa_task_ctx *ctx = task_ctx(current); current 689 security/apparmor/lsm.c aa_inherit_files(bprm->cred, current->files); current 691 security/apparmor/lsm.c current->pdeath_signal = 0; current 704 security/apparmor/lsm.c aa_clear_task_ctx_trans(task_ctx(current)); current 1528 security/apparmor/lsm.c struct cred *cred = (struct cred *)current->real_cred; current 121 security/apparmor/path.c get_fs_root(current->fs, &root); current 130 security/apparmor/procattr.c , __func__, current->pid, token, count, hat); current 135 security/apparmor/procattr.c __func__, current->pid, token, count, "<NULL>"); current 161 security/apparmor/resource.c rlim = current->signal->rlim + j; current 180 security/apparmor/resource.c rlim = current->signal->rlim + j; current 44 security/apparmor/task.c struct aa_task_ctx *ctx = task_ctx(current); current 70 security/apparmor/task.c aa_clear_task_ctx_trans(task_ctx(current)); current 95 security/apparmor/task.c struct aa_task_ctx *ctx = task_ctx(current); current 117 security/apparmor/task.c struct aa_task_ctx *ctx = task_ctx(current); current 157 security/apparmor/task.c struct aa_task_ctx *ctx = task_ctx(current); current 840 security/commoncap.c !ptracer_capable(current, new->user_ns))) { current 1330 security/commoncap.c current->flags |= PF_SUPERPRIV; current 811 security/device_cgroup.c dev_cgroup = task_devcgroup(current); current 55 security/integrity/ima/ima_appraise.c security_task_getsecid(current, &secid); current 386 security/integrity/ima/ima_main.c security_task_getsecid(current, &secid); current 412 security/integrity/ima/ima_main.c security_task_getsecid(current, &secid); current 437 security/integrity/ima/ima_main.c security_task_getsecid(current, &secid); current 569 security/integrity/ima/ima_main.c security_task_getsecid(current, &secid); current 692 security/integrity/ima/ima_main.c security_task_getsecid(current, &secid); current 401 security/integrity/ima/ima_policy.c if (has_capability_noaudit(current, CAP_SETUID)) { current 40 security/integrity/integrity_audit.c task_pid_nr(current), current 42 security/integrity/integrity_audit.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 43 security/integrity/integrity_audit.c audit_get_sessionid(current)); current 46 security/integrity/integrity_audit.c audit_log_untrustedstring(ab, get_task_comm(name, current)); current 1640 security/keys/keyctl.c me = current; current 223 security/keys/keyring.c index_key->domain_tag = current->nsproxy->net_ns->key_domain; current 917 security/keys/process_keys.c if (unlikely(current->flags & PF_EXITING)) { current 25 security/keys/request_key.c struct key *key = current->cached_requested_key; current 39 security/keys/request_key.c struct task_struct *t = current; current 207 security/keys/request_key_auth.c rka->pid = current->pid; current 25 security/loadpin/loadpin.c cmdline = kstrdup_quotable_cmdline(current, GFP_KERNEL); current 32 security/loadpin/loadpin.c task_pid_nr(current), current 94 security/lockdown/lockdown.c current->comm, lockdown_reasons[what]); current 211 security/lsm_audit.c char comm[sizeof(current->comm)]; current 220 security/lsm_audit.c audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current)); current 221 security/lsm_audit.c audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm))); current 321 security/security.c lsm_early_cred((struct cred *) current->cred); current 322 security/security.c lsm_early_task(current); current 1420 security/security.c if (!(current->personality & READ_IMPLIES_EXEC)) current 210 security/selinux/hooks.c struct cred *cred = (struct cred *) current->real_cred; current 2267 security/selinux/hooks.c tracer = ptrace_parent(current); current 2513 security/selinux/hooks.c flush_unauthorized_files(bprm->cred, current->files); current 2516 security/selinux/hooks.c current->pdeath_signal = 0; current 2533 security/selinux/hooks.c task_lock(current); current 2535 security/selinux/hooks.c rlim = current->signal->rlim + i; current 2539 security/selinux/hooks.c task_unlock(current); current 2541 security/selinux/hooks.c update_rlimit_cpu(current, rlimit(RLIMIT_CPU)); current 2577 security/selinux/hooks.c spin_lock_irq(¤t->sighand->siglock); current 2578 security/selinux/hooks.c if (!fatal_signal_pending(current)) { current 2579 security/selinux/hooks.c flush_sigqueue(¤t->pending); current 2580 security/selinux/hooks.c flush_sigqueue(¤t->signal->shared_pending); current 2581 security/selinux/hooks.c flush_signal_handlers(current, 1); current 2582 security/selinux/hooks.c sigemptyset(¤t->blocked); current 2585 security/selinux/hooks.c spin_unlock_irq(¤t->sighand->siglock); current 2591 security/selinux/hooks.c __wake_up_parent(current, current->real_parent); current 5557 security/selinux/hooks.c task_pid_nr(current), current->comm); current 6305 security/selinux/hooks.c if (current != p) { current 173 security/selinux/selinuxfs.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 174 security/selinux/selinuxfs.c audit_get_sessionid(current), current 309 security/selinux/selinuxfs.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 310 security/selinux/selinuxfs.c audit_get_sessionid(current), 0, 1); current 579 security/selinux/selinuxfs.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 580 security/selinux/selinuxfs.c audit_get_sessionid(current)); current 2877 security/selinux/ss/services.c from_kuid(&init_user_ns, audit_get_loginuid(current)), current 2878 security/selinux/ss/services.c audit_get_sessionid(current)); current 674 security/smack/smack_access.c if (unlikely(current->flags & PF_KTHREAD)) current 152 security/smack/smack_lsm.c acc, current->comm, note); current 174 security/smack/smack_lsm.c current->comm, otp->comm); current 190 security/smack/smack_lsm.c inode->i_sb->s_id, inode->i_ino, current->comm); current 204 security/smack/smack_lsm.c inode->i_sb->s_id, inode->i_ino, current->comm); current 222 security/smack/smack_lsm.c inode->i_sb->s_id, inode->i_ino, current->comm); current 233 security/smack/smack_lsm.c current->comm); current 252 security/smack/smack_lsm.c inode->i_sb->s_id, inode->i_ino, current->comm); current 263 security/smack/smack_lsm.c current->comm); current 486 security/smack/smack_lsm.c return smk_ptrace_rule_check(current, skp, mode, __func__); current 928 security/smack/smack_lsm.c tracer = ptrace_parent(current); current 2255 security/smack/smack_lsm.c if (unlikely(current->flags & PF_KTHREAD)) { current 2756 security/smack/smack_lsm.c if (unlikely(current->flags & PF_KTHREAD)) { current 4757 security/smack/smack_lsm.c struct cred *cred = (struct cred *) current->cred; current 186 security/smack/smackfs.c nap->loginuid = audit_get_loginuid(current); current 187 security/smack/smackfs.c nap->sessionid = audit_get_sessionid(current); current 153 security/tomoyo/audit.c const pid_t gpid = task_pid_nr(current); current 941 security/tomoyo/common.c const struct task_struct *task = current; current 965 security/tomoyo/common.c const pid_t pid = current->pid; current 1139 security/tomoyo/common.h pid = task_tgid_vnr(rcu_dereference(current->real_parent)); current 1154 security/tomoyo/common.h return task_tgid_vnr(current); current 872 security/tomoyo/domain.c struct tomoyo_task *s = tomoyo_task(current); current 917 security/tomoyo/domain.c if (get_user_pages_remote(current, bprm->mm, pos, 1, current 21 security/tomoyo/memory.c const pid_t pid = current->pid; current 166 security/tomoyo/realpath.c task_tgid_nr_ns(current, sb->s_fs_info)) { current 73 security/tomoyo/securityfs_if.c struct tomoyo_task *s = tomoyo_task(current); current 18 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(current); current 20 security/tomoyo/tomoyo.c if (s->old_domain_info && !current->in_execve) { current 40 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(current); current 42 security/tomoyo/tomoyo.c if (s->old_domain_info && !current->in_execve) { current 58 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(current); current 99 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(current); current 317 security/tomoyo/tomoyo.c if (current->in_execve) current 504 security/tomoyo/tomoyo.c struct tomoyo_task *old = tomoyo_task(current); current 581 security/tomoyo/tomoyo.c struct tomoyo_task *s = tomoyo_task(current); current 948 security/tomoyo/util.c struct mm_struct *mm = current->mm; current 82 security/yama/yama_lsm.c if (current->flags & PF_KTHREAD) { current 102 security/yama/yama_lsm.c if (task_work_add(current, &info->work, true) == 0) current 225 security/yama/yama_lsm.c struct task_struct *myself = current; current 369 security/yama/yama_lsm.c if (!rc && !task_is_descendant(current, child) && current 370 security/yama/yama_lsm.c !ptracer_exception_found(current, child) && current 389 security/yama/yama_lsm.c report_access("attach", child, current); current 416 security/yama/yama_lsm.c task_lock(current); current 417 security/yama/yama_lsm.c report_access("traceme", current, parent); current 418 security/yama/yama_lsm.c task_unlock(current); current 71 sound/core/control.c ctl->pid = get_pid(task_pid(current)); current 1609 sound/core/control.c init_waitqueue_entry(&wait, current); current 1617 sound/core/control.c if (signal_pending(current)) current 1768 sound/core/control.c if (kctl->pid == task_pid(current)) { current 93 sound/core/hwdep.c init_waitqueue_entry(&wait, current); current 123 sound/core/hwdep.c if (signal_pending(current)) { current 1002 sound/core/init.c init_waitqueue_entry(&wait, current); current 1435 sound/core/oss/pcm_oss.c if (signal_pending(current)) { current 1530 sound/core/oss/pcm_oss.c if (signal_pending(current)) { current 1587 sound/core/oss/pcm_oss.c init_waitqueue_entry(&wait, current); current 1611 sound/core/oss/pcm_oss.c if (signal_pending(current)) { current 2470 sound/core/oss/pcm_oss.c if (snd_task_name(current, task_name, sizeof(task_name)) < 0) { current 2486 sound/core/oss/pcm_oss.c init_waitqueue_entry(&wait, current); current 2509 sound/core/oss/pcm_oss.c if (signal_pending(current)) { current 973 sound/core/pcm.c substream->pid = get_pid(task_pid(current)); current 1825 sound/core/pcm_lib.c init_waitqueue_entry(&wait, current); current 1848 sound/core/pcm_lib.c if (signal_pending(current)) { current 1870 sound/core/pcm_native.c if (signal_pending(current)) { current 1889 sound/core/pcm_native.c init_waitqueue_entry(&wait, current); current 2570 sound/core/pcm_native.c init_waitqueue_entry(&wait, current); current 2592 sound/core/pcm_native.c if (signal_pending(current)) { current 202 sound/core/rawmidi.c if (signal_pending(current)) current 285 sound/core/rawmidi.c substream->pid = get_pid(task_pid(current)); current 417 sound/core/rawmidi.c init_waitqueue_entry(&wait, current); current 439 sound/core/rawmidi.c if (signal_pending(current)) { current 1034 sound/core/rawmidi.c init_waitqueue_entry(&wait, current); current 1042 sound/core/rawmidi.c if (signal_pending(current)) current 1363 sound/core/rawmidi.c init_waitqueue_entry(&wait, current); current 1371 sound/core/rawmidi.c if (signal_pending(current)) current 1393 sound/core/rawmidi.c init_waitqueue_entry(&wait, current); current 1399 sound/core/rawmidi.c if (signal_pending(current)) current 88 sound/core/seq/oss/seq_oss_ioctl.c if (signal_pending(current)) current 50 sound/core/seq/oss/seq_oss_rw.c if (signal_pending(current)) current 110 sound/core/seq/oss/seq_oss_writeq.c if (signal_pending(current)) current 369 sound/core/seq/seq_clientmgr.c client->data.user.owner = get_pid(task_pid(current)); current 172 sound/core/seq/seq_fifo.c init_waitqueue_entry(&wait, current); current 186 sound/core/seq/seq_fifo.c if (signal_pending(current)) { current 222 sound/core/seq/seq_memory.c init_waitqueue_entry(&wait, current); current 242 sound/core/seq/seq_memory.c if (signal_pending(current)) { current 1735 sound/core/timer.c sprintf(str, "application %i", current->pid); current 1738 sound/core/timer.c err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); current 2078 sound/core/timer.c init_waitqueue_entry(&wait, current); current 2093 sound/core/timer.c if (signal_pending(current)) { current 33 sound/firewire/bebob/bebob_hwdep.c if (signal_pending(current)) current 25 sound/firewire/dice/dice-hwdep.c if (signal_pending(current)) current 33 sound/firewire/digi00x/digi00x-hwdep.c if (signal_pending(current)) current 32 sound/firewire/fireface/ff-hwdep.c if (signal_pending(current)) current 139 sound/firewire/fireworks/fireworks_hwdep.c if (signal_pending(current)) current 33 sound/firewire/motu/motu-hwdep.c if (signal_pending(current)) current 32 sound/firewire/oxfw/oxfw-hwdep.c if (signal_pending(current)) current 109 sound/firewire/tascam/tascam-hwdep.c if (signal_pending(current)) current 334 sound/isa/gus/gus_pcm.c if (signal_pending(current)) current 210 sound/isa/msnd/msnd.c if (!signal_pending(current)) current 126 sound/isa/sb/emu8000.c if (signal_pending(current)) current 138 sound/isa/sb/emu8000.c if (signal_pending(current)) current 437 sound/isa/sb/emu8000.c if (signal_pending(current)) current 102 sound/isa/sb/emu8000_patch.c if (signal_pending(current)) current 109 sound/isa/sb/emu8000_pcm.c if (signal_pending(current)) current 408 sound/isa/sb/emu8000_pcm.c if (signal_pending(current))\ current 272 sound/isa/wavefront/wavefront_synth.c return signal_pending(current); current 1783 sound/isa/wavefront/wavefront_synth.c init_waitqueue_entry(&wait, current); current 636 sound/oss/dmasound/dmasound_core.c if (signal_pending(current)) { current 859 sound/oss/dmasound/dmasound_core.c if (signal_pending(current)) { current 1391 sound/pci/korg1212/korg1212.c korg1212->playback_pid = current->pid; current 1422 sound/pci/korg1212/korg1212.c korg1212->capture_pid = current->pid; current 232 sound/pci/mixart/mixart_core.c init_waitqueue_entry(&wait, current); current 284 sound/pci/mixart/mixart_core.c init_waitqueue_entry(&wait, current); current 4450 sound/pci/rme9652/hdsp.c hdsp->playback_pid = current->pid; current 4527 sound/pci/rme9652/hdsp.c hdsp->capture_pid = current->pid; current 6067 sound/pci/rme9652/hdspm.c hdspm->playback_pid = current->pid; current 6073 sound/pci/rme9652/hdspm.c hdspm->capture_pid = current->pid; current 2299 sound/pci/rme9652/rme9652.c rme9652->playback_pid = current->pid; current 2359 sound/pci/rme9652/rme9652.c rme9652->capture_pid = current->pid; current 776 sound/pci/ymfpci/ymfpci_main.c init_waitqueue_entry(&wait, current); current 56 sound/soc/sof/trace.c init_waitqueue_entry(&wait, current); current 60 sound/soc/sof/trace.c if (!signal_pending(current)) { current 31 tools/iio/iio_utils.c char *current; current 43 tools/iio/iio_utils.c current = strdup(full_name + strlen(prefix) + 1); current 44 tools/iio/iio_utils.c if (!current) current 47 tools/iio/iio_utils.c working = strtok(current, "_\0"); current 49 tools/iio/iio_utils.c free(current); current 66 tools/iio/iio_utils.c free(current); current 317 tools/iio/iio_utils.c struct iio_channel_info *current; current 384 tools/iio/iio_utils.c current = &(*ci_array)[count++]; current 423 tools/iio/iio_utils.c current->scale = 1.0; current 424 tools/iio/iio_utils.c current->offset = 0; current 425 tools/iio/iio_utils.c current->name = strndup(ent->d_name, current 428 tools/iio/iio_utils.c if (!current->name) { current 436 tools/iio/iio_utils.c ret = iioutils_break_up_name(current->name, current 437 tools/iio/iio_utils.c ¤t->generic_name); current 440 tools/iio/iio_utils.c free(current->name); current 448 tools/iio/iio_utils.c current->name); current 465 tools/iio/iio_utils.c if (fscanf(sysfsfp, "%u", ¤t->index) != 1) { current 482 tools/iio/iio_utils.c ret = iioutils_get_param_float(¤t->scale, current 485 tools/iio/iio_utils.c current->name, current 486 tools/iio/iio_utils.c current->generic_name); current 490 tools/iio/iio_utils.c ret = iioutils_get_param_float(¤t->offset, current 493 tools/iio/iio_utils.c current->name, current 494 tools/iio/iio_utils.c current->generic_name); current 498 tools/iio/iio_utils.c ret = iioutils_get_type(¤t->is_signed, current 499 tools/iio/iio_utils.c ¤t->bytes, current 500 tools/iio/iio_utils.c ¤t->bits_used, current 501 tools/iio/iio_utils.c ¤t->shift, current 502 tools/iio/iio_utils.c ¤t->mask, current 503 tools/iio/iio_utils.c ¤t->be, current 505 tools/iio/iio_utils.c current->name, current 506 tools/iio/iio_utils.c current->generic_name); current 5657 tools/lib/traceevent/event-parse.c char *current = format; current 5668 tools/lib/traceevent/event-parse.c while (*current) { current 5669 tools/lib/traceevent/event-parse.c current = strchr(str, '%'); current 5670 tools/lib/traceevent/event-parse.c if (!current) { current 5675 tools/lib/traceevent/event-parse.c offset = tep_print_event_param_type(current, &type); current 5676 tools/lib/traceevent/event-parse.c *current = '\0'; current 5678 tools/lib/traceevent/event-parse.c current += offset; current 5693 tools/lib/traceevent/event-parse.c str = current; current 102 tools/perf/builtin-timechart.c struct per_pidcomm *current; current 224 tools/perf/builtin-timechart.c p->current = c; current 229 tools/perf/builtin-timechart.c p->current = c; current 237 tools/perf/builtin-timechart.c p->current = c; current 248 tools/perf/builtin-timechart.c if (pp->current && pp->current->comm && !p->current) current 249 tools/perf/builtin-timechart.c pid_set_comm(tchart, pid, pp->current->comm); current 252 tools/perf/builtin-timechart.c if (p->current && !p->current->start_time) { current 253 tools/perf/builtin-timechart.c p->current->start_time = timestamp; current 254 tools/perf/builtin-timechart.c p->current->state_since = timestamp; current 263 tools/perf/builtin-timechart.c if (p->current) current 264 tools/perf/builtin-timechart.c p->current->end_time = timestamp; current 276 tools/perf/builtin-timechart.c c = p->current; current 280 tools/perf/builtin-timechart.c p->current = c; current 427 tools/perf/builtin-timechart.c if (p && p->current && p->current->state == TYPE_NONE) { current 428 tools/perf/builtin-timechart.c p->current->state_since = timestamp; current 429 tools/perf/builtin-timechart.c p->current->state = TYPE_WAITING; current 431 tools/perf/builtin-timechart.c if (p && p->current && p->current->state == TYPE_BLOCKED) { current 432 tools/perf/builtin-timechart.c pid_put_sample(tchart, p->pid, p->current->state, cpu, current 433 tools/perf/builtin-timechart.c p->current->state_since, timestamp, NULL); current 434 tools/perf/builtin-timechart.c p->current->state_since = timestamp; current 435 tools/perf/builtin-timechart.c p->current->state = TYPE_WAITING; current 449 tools/perf/builtin-timechart.c if (prev_p->current && prev_p->current->state != TYPE_NONE) current 451 tools/perf/builtin-timechart.c prev_p->current->state_since, timestamp, current 453 tools/perf/builtin-timechart.c if (p && p->current) { current 454 tools/perf/builtin-timechart.c if (p->current->state != TYPE_NONE) current 455 tools/perf/builtin-timechart.c pid_put_sample(tchart, next_pid, p->current->state, cpu, current 456 tools/perf/builtin-timechart.c p->current->state_since, timestamp, current 459 tools/perf/builtin-timechart.c p->current->state_since = timestamp; current 460 tools/perf/builtin-timechart.c p->current->state = TYPE_RUNNING; current 463 tools/perf/builtin-timechart.c if (prev_p->current) { current 464 tools/perf/builtin-timechart.c prev_p->current->state = TYPE_NONE; current 465 tools/perf/builtin-timechart.c prev_p->current->state_since = timestamp; current 467 tools/perf/builtin-timechart.c prev_p->current->state = TYPE_BLOCKED; current 469 tools/perf/builtin-timechart.c prev_p->current->state = TYPE_WAITING; current 722 tools/perf/builtin-timechart.c struct per_pidcomm *c = p->current; current 730 tools/perf/builtin-timechart.c p->current = c; current 767 tools/perf/builtin-timechart.c struct per_pidcomm *c = p->current; current 112 tools/perf/builtin-trace.c struct thread *current; current 1879 tools/perf/builtin-trace.c if (trace->failure_only || trace->current == NULL) current 1882 tools/perf/builtin-trace.c ttrace = thread__priv(trace->current); current 1887 tools/perf/builtin-trace.c printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); current 2013 tools/perf/builtin-trace.c if (trace->current != thread) { current 2014 tools/perf/builtin-trace.c thread__put(trace->current); current 2015 tools/perf/builtin-trace.c trace->current = thread__get(thread); current 3493 tools/perf/builtin-trace.c thread__zput(trace->current); current 21 tools/perf/ui/browser.c double percent, bool current) current 23 tools/perf/ui/browser.c if (current && (!browser->use_navkeypressed || browser->navkeypressed)) current 41 tools/perf/ui/browser.c double percent, bool current) current 43 tools/perf/ui/browser.c int color = ui_browser__percent_color(browser, percent, current); current 39 tools/perf/ui/browser.h double percent, bool current); current 53 tools/perf/ui/browsers/annotate.c static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, bool current) current 57 tools/perf/ui/browsers/annotate.c if (current && (!browser->use_navkeypressed || browser->navkeypressed)) current 66 tools/perf/ui/browsers/annotate.c static int ui_browser__set_jumps_percent_color(void *browser, int nr, bool current) current 68 tools/perf/ui/browsers/annotate.c int color = ui_browser__jumps_percent_color(browser, nr, current); current 82 tools/perf/ui/browsers/annotate.c static void annotate_browser__set_percent_color(void *browser, double percent, bool current) current 84 tools/perf/ui/browsers/annotate.c ui_browser__set_percent_color(browser, percent, current); current 2366 tools/perf/util/annotate.c bool current __maybe_unused) current 2371 tools/perf/util/annotate.c int nr __maybe_unused, bool current __maybe_unused) current 2812 tools/perf/util/annotate.c void (*obj__set_percent_color)(void *obj, double percent, bool current), current 2813 tools/perf/util/annotate.c int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current), current 207 tools/perf/util/annotate.h void (*set_percent_color)(void *obj, double percent, bool current); current 208 tools/perf/util/annotate.h int (*set_jumps_percent_color)(void *obj, int nr, bool current); current 216 tools/perf/util/db-export.c struct call_path *current = &dbe->cpr->call_path; current 263 tools/perf/util/db-export.c current = call_path__findnew(dbe->cpr, current, current 273 tools/perf/util/db-export.c if (current == &dbe->cpr->call_path) { current 278 tools/perf/util/db-export.c return current; current 265 tools/power/cpupower/lib/cpufreq.c struct cpufreq_available_governors *current = NULL; current 280 tools/power/cpupower/lib/cpufreq.c if (current) { current 281 tools/power/cpupower/lib/cpufreq.c current->next = malloc(sizeof(*current)); current 282 tools/power/cpupower/lib/cpufreq.c if (!current->next) current 284 tools/power/cpupower/lib/cpufreq.c current = current->next; current 289 tools/power/cpupower/lib/cpufreq.c current = first; current 291 tools/power/cpupower/lib/cpufreq.c current->first = first; current 292 tools/power/cpupower/lib/cpufreq.c current->next = NULL; current 294 tools/power/cpupower/lib/cpufreq.c current->governor = malloc(i - pos + 1); current 295 tools/power/cpupower/lib/cpufreq.c if (!current->governor) current 298 tools/power/cpupower/lib/cpufreq.c memcpy(current->governor, linebuf + pos, i - pos); current 299 tools/power/cpupower/lib/cpufreq.c current->governor[i - pos] = '\0'; current 308 tools/power/cpupower/lib/cpufreq.c current = first->next; current 312 tools/power/cpupower/lib/cpufreq.c first = current; current 339 tools/power/cpupower/lib/cpufreq.c struct cpufreq_available_frequencies *current = NULL; current 357 tools/power/cpupower/lib/cpufreq.c if (current) { current 358 tools/power/cpupower/lib/cpufreq.c current->next = malloc(sizeof(*current)); current 359 tools/power/cpupower/lib/cpufreq.c if (!current->next) current 361 tools/power/cpupower/lib/cpufreq.c current = current->next; current 366 tools/power/cpupower/lib/cpufreq.c current = first; current 368 tools/power/cpupower/lib/cpufreq.c current->first = first; current 369 tools/power/cpupower/lib/cpufreq.c current->next = NULL; current 373 tools/power/cpupower/lib/cpufreq.c if (sscanf(one_value, "%lu", ¤t->frequency) != 1) current 384 tools/power/cpupower/lib/cpufreq.c current = first->next; current 386 tools/power/cpupower/lib/cpufreq.c first = current; current 395 tools/power/cpupower/lib/cpufreq.c struct cpufreq_available_frequencies *current = NULL; current 413 tools/power/cpupower/lib/cpufreq.c if (current) { current 414 tools/power/cpupower/lib/cpufreq.c current->next = malloc(sizeof(*current)); current 415 tools/power/cpupower/lib/cpufreq.c if (!current->next) current 417 tools/power/cpupower/lib/cpufreq.c current = current->next; current 422 tools/power/cpupower/lib/cpufreq.c current = first; current 424 tools/power/cpupower/lib/cpufreq.c current->first = first; current 425 tools/power/cpupower/lib/cpufreq.c current->next = NULL; current 429 tools/power/cpupower/lib/cpufreq.c if (sscanf(one_value, "%lu", ¤t->frequency) != 1) current 440 tools/power/cpupower/lib/cpufreq.c current = first->next; current 442 tools/power/cpupower/lib/cpufreq.c first = current; current 471 tools/power/cpupower/lib/cpufreq.c struct cpufreq_affected_cpus *current = NULL; current 488 tools/power/cpupower/lib/cpufreq.c if (current) { current 489 tools/power/cpupower/lib/cpufreq.c current->next = malloc(sizeof(*current)); current 490 tools/power/cpupower/lib/cpufreq.c if (!current->next) current 492 tools/power/cpupower/lib/cpufreq.c current = current->next; current 497 tools/power/cpupower/lib/cpufreq.c current = first; current 499 tools/power/cpupower/lib/cpufreq.c current->first = first; current 500 tools/power/cpupower/lib/cpufreq.c current->next = NULL; current 505 tools/power/cpupower/lib/cpufreq.c if (sscanf(one_value, "%u", ¤t->cpu) != 1) current 516 tools/power/cpupower/lib/cpufreq.c current = first->next; current 518 tools/power/cpupower/lib/cpufreq.c first = current; current 702 tools/power/cpupower/lib/cpufreq.c struct cpufreq_stats *current = NULL; current 721 tools/power/cpupower/lib/cpufreq.c if (current) { current 722 tools/power/cpupower/lib/cpufreq.c current->next = malloc(sizeof(*current)); current 723 tools/power/cpupower/lib/cpufreq.c if (!current->next) current 725 tools/power/cpupower/lib/cpufreq.c current = current->next; current 730 tools/power/cpupower/lib/cpufreq.c current = first; current 732 tools/power/cpupower/lib/cpufreq.c current->first = first; current 733 tools/power/cpupower/lib/cpufreq.c current->next = NULL; current 738 tools/power/cpupower/lib/cpufreq.c ¤t->frequency, current 739 tools/power/cpupower/lib/cpufreq.c ¤t->time_in_state) != 2) current 742 tools/power/cpupower/lib/cpufreq.c *total_time = *total_time + current->time_in_state; current 751 tools/power/cpupower/lib/cpufreq.c current = first->next; current 753 tools/power/cpupower/lib/cpufreq.c first = current; current 462 tools/spi/spidev_test.c struct timespec current; current 466 tools/spi/spidev_test.c clock_gettime(CLOCK_MONOTONIC, ¤t); current 467 tools/spi/spidev_test.c if (current.tv_sec - last_stat.tv_sec > interval) { current 469 tools/spi/spidev_test.c last_stat = current; current 93 tools/testing/selftests/cgroup/test_memcontrol.c long anon, current; current 100 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(cgroup, "memory.current"); current 101 tools/testing/selftests/cgroup/test_memcontrol.c if (current < size) current 104 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(size, current, 3)) current 111 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(anon, current, 3)) current 124 tools/testing/selftests/cgroup/test_memcontrol.c long current, file; current 134 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(cgroup, "memory.current"); current 135 tools/testing/selftests/cgroup/test_memcontrol.c if (current < size) current 142 tools/testing/selftests/cgroup/test_memcontrol.c if (!values_close(file, current, 10)) current 160 tools/testing/selftests/cgroup/test_memcontrol.c long current; current 170 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(memcg, "memory.current"); current 171 tools/testing/selftests/cgroup/test_memcontrol.c if (current != 0) current 560 tools/testing/selftests/cgroup/test_memcontrol.c long current; current 570 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(cgroup, "memory.current"); current 571 tools/testing/selftests/cgroup/test_memcontrol.c if (current <= MB(29) || current > MB(30)) current 640 tools/testing/selftests/cgroup/test_memcontrol.c long current, max; current 665 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(memcg, "memory.current"); current 666 tools/testing/selftests/cgroup/test_memcontrol.c if (current > MB(30) || !current) current 902 tools/testing/selftests/cgroup/test_memcontrol.c long current, sock; current 907 tools/testing/selftests/cgroup/test_memcontrol.c current = cg_read_long(cgroup, "memory.current"); current 910 tools/testing/selftests/cgroup/test_memcontrol.c if (current < 0 || sock < 0) current 913 tools/testing/selftests/cgroup/test_memcontrol.c if (current < sock) current 916 tools/testing/selftests/cgroup/test_memcontrol.c if (values_close(current, sock, 10)) { current 130 tools/testing/selftests/gpio/gpio-mockup-chardev.c struct gpiochip_info *current; current 141 tools/testing/selftests/gpio/gpio-mockup-chardev.c current = cinfo; current 163 tools/testing/selftests/gpio/gpio-mockup-chardev.c *ret = ioctl(fd, GPIO_GET_CHIPINFO_IOCTL, current); current 169 tools/testing/selftests/gpio/gpio-mockup-chardev.c if (strcmp(current->label, gpiochip_name) == 0 current 170 tools/testing/selftests/gpio/gpio-mockup-chardev.c || check_prefix(current->label, gpiochip_name)) { current 172 tools/testing/selftests/gpio/gpio-mockup-chardev.c current++; current 275 tools/testing/selftests/gpio/gpio-mockup-chardev.c struct gpiochip_info *current; current 302 tools/testing/selftests/gpio/gpio-mockup-chardev.c current = cinfo; current 304 tools/testing/selftests/gpio/gpio-mockup-chardev.c gpio_pin_tests(current, 0); current 305 tools/testing/selftests/gpio/gpio-mockup-chardev.c gpio_pin_tests(current, current->lines - 1); current 306 tools/testing/selftests/gpio/gpio-mockup-chardev.c gpio_pin_tests(current, random() % current->lines); current 307 tools/testing/selftests/gpio/gpio-mockup-chardev.c current++; current 76 tools/testing/selftests/powerpc/pmu/count_instructions.c u64 current, overhead; current 84 tools/testing/selftests/powerpc/pmu/count_instructions.c current = events[0].result.value; current 85 tools/testing/selftests/powerpc/pmu/count_instructions.c if (current < overhead) { current 86 tools/testing/selftests/powerpc/pmu/count_instructions.c printf("Replacing overhead %llu with %llu\n", overhead, current); current 87 tools/testing/selftests/powerpc/pmu/count_instructions.c overhead = current; current 72 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c uint64_t current, overhead; current 80 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c current = event->result.value; current 81 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c if (current < overhead) { current 82 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c printf("Replacing overhead %lu with %lu\n", overhead, current); current 83 tools/testing/selftests/powerpc/pmu/ebb/instruction_count_test.c overhead = current; current 193 tools/testing/selftests/powerpc/pmu/lib.c long current; current 211 tools/testing/selftests/powerpc/pmu/lib.c current = strtol(buf, &end, 10); current 218 tools/testing/selftests/powerpc/pmu/lib.c if (current >= level) current 24 tools/testing/selftests/rtc/setdate.c struct rtc_time new, current; current 65 tools/testing/selftests/rtc/setdate.c retval = ioctl(fd, RTC_RD_TIME, ¤t); current 72 tools/testing/selftests/rtc/setdate.c current.tm_mday, current.tm_mon + 1, current.tm_year + 1900, current 73 tools/testing/selftests/rtc/setdate.c current.tm_hour, current.tm_min, current.tm_sec); current 735 virt/kvm/arm/arm.c if (signal_pending(current)) { current 944 virt/kvm/arm/mmu.c struct vm_area_struct *vma = find_vma(current->mm, hva); current 978 virt/kvm/arm/mmu.c down_read(¤t->mm->mmap_sem); current 986 virt/kvm/arm/mmu.c up_read(¤t->mm->mmap_sem); current 1609 virt/kvm/arm/mmu.c send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); current 1696 virt/kvm/arm/mmu.c down_read(¤t->mm->mmap_sem); current 1697 virt/kvm/arm/mmu.c vma = find_vma_intersection(current->mm, hva, hva + 1); current 1700 virt/kvm/arm/mmu.c up_read(¤t->mm->mmap_sem); current 1722 virt/kvm/arm/mmu.c up_read(¤t->mm->mmap_sem); current 2298 virt/kvm/arm/mmu.c down_read(¤t->mm->mmap_sem); current 2312 virt/kvm/arm/mmu.c struct vm_area_struct *vma = find_vma(current->mm, hva); current 2366 virt/kvm/arm/mmu.c up_read(¤t->mm->mmap_sem); current 615 virt/kvm/arm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, current 625 virt/kvm/arm/pmu.c event = perf_event_create_kernel_counter(&attr, -1, current, current 191 virt/kvm/async_pf.c work->mm = current->mm; current 556 virt/kvm/kvm_main.c return mmu_notifier_register(&kvm->mmu_notifier, current->mm); current 644 virt/kvm/kvm_main.c snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd); current 695 virt/kvm/kvm_main.c mmgrab(current->mm); current 696 virt/kvm/kvm_main.c kvm->mm = current->mm; current 759 virt/kvm/kvm_main.c mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); current 776 virt/kvm/kvm_main.c mmdrop(current->mm); current 1414 virt/kvm/kvm_main.c down_read(¤t->mm->mmap_sem); current 1415 virt/kvm/kvm_main.c vma = find_vma(current->mm, addr); current 1422 virt/kvm/kvm_main.c up_read(¤t->mm->mmap_sem); current 1609 virt/kvm/kvm_main.c r = fixup_user_fault(current, current->mm, addr, current 1677 virt/kvm/kvm_main.c down_read(¤t->mm->mmap_sem); current 1685 virt/kvm/kvm_main.c vma = find_vma_intersection(current->mm, addr, addr + 1); current 1701 virt/kvm/kvm_main.c up_read(¤t->mm->mmap_sem); current 2397 virt/kvm/kvm_main.c sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked); current 2405 virt/kvm/kvm_main.c sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL); current 2406 virt/kvm/kvm_main.c sigemptyset(¤t->real_blocked); current 2457 virt/kvm/kvm_main.c if (signal_pending(current)) current 2882 virt/kvm/kvm_main.c if (vcpu->kvm->mm != current->mm) current 2905 virt/kvm/kvm_main.c if (unlikely(oldpid != task_pid(current))) { current 2913 virt/kvm/kvm_main.c newpid = get_task_pid(current, PIDTYPE_PID); current 3089 virt/kvm/kvm_main.c if (vcpu->kvm->mm != current->mm) current 3154 virt/kvm/kvm_main.c if (dev->kvm->mm != current->mm) current 3355 virt/kvm/kvm_main.c if (kvm->mm != current->mm) current 3553 virt/kvm/kvm_main.c if (kvm->mm != current->mm) current 4307 virt/kvm/kvm_main.c kvm->userspace_pid = task_pid_nr(current); current 4389 virt/kvm/kvm_main.c if (current->state == TASK_RUNNING) { current 4543 virt/kvm/kvm_main.c err = kthread_park(current); current 4549 virt/kvm/kvm_main.c err = cgroup_attach_task_all(init_context->parent, current); current 4556 virt/kvm/kvm_main.c set_user_nice(current, task_nice(init_context->parent)); current 4584 virt/kvm/kvm_main.c init_context.parent = current; current 4590 virt/kvm/kvm_main.c "%s-%d", name, task_pid_nr(current));