/linux-4.4.14/arch/score/include/asm/ |
H A D | current.h | 4 #include <asm-generic/current.h>
|
/linux-4.4.14/arch/sparc/kernel/ |
H A D | sigutil_32.c | 18 if (test_tsk_thread_flag(current, TIF_USEDFPU)) { save_fpu_state() 20 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, save_fpu_state() 21 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); save_fpu_state() 23 clear_tsk_thread_flag(current, TIF_USEDFPU); save_fpu_state() 26 if (current == last_task_used_math) { save_fpu_state() 28 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, save_fpu_state() 29 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); save_fpu_state() 35 ¤t->thread.float_regs[0], save_fpu_state() 37 err |= __put_user(current->thread.fsr, &fpu->si_fsr); save_fpu_state() 38 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); save_fpu_state() 39 if (current->thread.fpqdepth != 0) save_fpu_state() 41 ¤t->thread.fpqueue[0], save_fpu_state() 56 if (test_tsk_thread_flag(current, TIF_USEDFPU)) restore_fpu_state() 59 if (current == last_task_used_math) { restore_fpu_state() 65 clear_tsk_thread_flag(current, TIF_USEDFPU); restore_fpu_state() 70 err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], restore_fpu_state() 72 err |= __get_user(current->thread.fsr, &fpu->si_fsr); restore_fpu_state() 73 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); restore_fpu_state() 74 if (current->thread.fpqdepth != 0) restore_fpu_state() 75 err |= __copy_from_user(¤t->thread.fpqueue[0], restore_fpu_state()
|
H A D | process_32.c | 155 tsk = current; show_stack() 157 if (tsk == current && !_ksp) show_stack() 185 * Free current thread data structures etc.. 190 if(last_task_used_math == current) { 196 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, 197 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); 211 if(last_task_used_math == current) { 217 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, 218 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); 227 if (current->thread.flags & SPARC_FLAG_KTHREAD) { 228 current->thread.flags &= ~SPARC_FLAG_KTHREAD; 232 current->thread.kregs = (struct pt_regs *) 233 (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ)); 316 if(last_task_used_math == current) { 364 ti->kpsr = current->thread.fork_kpsr | PSR_PIL; 365 ti->kwim = current->thread.fork_kwim; 404 childregs->u_regs[UREG_I0] = current->pid; 429 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, dump_fpu() 430 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); dump_fpu() 437 if (current == last_task_used_math) { dump_fpu() 439 fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, dump_fpu() 440 ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); dump_fpu() 448 ¤t->thread.float_regs[0], dump_fpu() 450 fpregs->pr_fsr = current->thread.fsr; dump_fpu() 451 fpregs->pr_qcnt = current->thread.fpqdepth; dump_fpu() 456 ¤t->thread.fpqueue[0], dump_fpu() 473 if (!task || task == current || get_wchan()
|
H A D | traps_32.c | 59 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); die_if_kernel() 109 force_sig_info(SIGILL, &info, current); do_hw_interrupt() 129 send_sig_info(SIGILL, &info, current); do_illegal_instruction() 144 send_sig_info(SIGILL, &info, current); do_priv_instruction() 170 send_sig_info(SIGBUS, &info, current); do_memaccess_unaligned() 190 if(last_task_used_math == current) do_fpd_trap() 198 last_task_used_math = current; do_fpd_trap() 200 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); do_fpd_trap() 211 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); do_fpd_trap() 232 struct task_struct *fpt = current; 279 fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); 341 send_sig_info(SIGEMT, &info, current); handle_tag_overflow() 370 force_sig_info(SIGBUS, &info, current); handle_reg_access() 383 send_sig_info(SIGILL, &info, current); handle_cp_disabled() 400 send_sig_info(SIGILL, &info, current); handle_cp_exception() 413 send_sig_info(SIGFPE, &info, current); handle_hw_divzero() 452 current->active_mm = &init_mm; trap_init()
|
/linux-4.4.14/arch/alpha/include/asm/ |
H A D | current.h | 7 #define current get_current() macro
|
H A D | processor.h | 13 * Returns current instruction pointer ("program counter"). 24 (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) 32 ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) 58 ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
|
H A D | a.out-core.h | 28 dump->start_code = current->mm->start_code; aout_dump_thread() 29 dump->start_data = current->mm->start_data; aout_dump_thread() 31 dump->u_tsize = ((current->mm->end_code - dump->start_code) aout_dump_thread() 33 dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) aout_dump_thread() 35 dump->u_ssize = (current->mm->start_stack - dump->start_stack aout_dump_thread()
|
H A D | tlbflush.h | 33 /* Flush just one page in the current TLB set. We need to be very 80 /* Flush current user mapping. */ 84 flush_tlb_current(current->active_mm); flush_tlb() 109 if (mm == current->active_mm) flush_tlb_mm() 121 if (mm == current->active_mm) flush_tlb_page()
|
/linux-4.4.14/include/asm-generic/ |
H A D | current.h | 7 #define current get_current() macro
|
/linux-4.4.14/scripts/gdb/linux/ |
H A D | lists.py | 38 gdb.write('prev.next != current: ' 39 'current@{current_addr}={current} ' 42 current=c, 49 'current@{current_addr}={current}\n'.format( 51 current=c 56 gdb.write('next.prev != current: ' 57 'current@{current_addr}={current} ' 60 current=c, 67 'current@{current_addr}={current}\n'.format( 69 current=c
|
/linux-4.4.14/arch/powerpc/kernel/ |
H A D | swsusp.c | 13 #include <asm/current.h> 23 flush_fp_to_thread(current); save_processor_state() 24 flush_altivec_to_thread(current); save_processor_state() 25 flush_spe_to_thread(current); save_processor_state() 36 switch_mmu_context(current->active_mm, current->active_mm); restore_processor_state()
|
H A D | signal_32.c | 404 * Save the current user registers on the user stack. 415 flush_fp_to_thread(current); save_user_regs() 423 if (current->thread.used_vr) { save_user_regs() 424 flush_altivec_to_thread(current); save_user_regs() 425 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, save_user_regs() 438 * Note that the current VRSAVE value is in the SPR at this point. save_user_regs() 441 current->thread.vrsave = mfspr(SPRN_VRSAVE); save_user_regs() 442 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) save_user_regs() 445 if (copy_fpr_to_user(&frame->mc_fregs, current)) save_user_regs() 460 if (current->thread.used_vsr && ctx_has_vsx_region) { save_user_regs() 461 __giveup_vsx(current); save_user_regs() 462 if (copy_vsx_to_user(&frame->mc_vsregs, current)) save_user_regs() 469 if (current->thread.used_spe) { save_user_regs() 470 flush_spe_to_thread(current); save_user_regs() 471 if (__copy_to_user(&frame->mc_vregs, current->thread.evr, save_user_regs() 481 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) save_user_regs() 507 * Save the current user registers on the user stack. 529 flush_fp_to_thread(current); save_tm_user_regs() 532 if (save_general_regs(¤t->thread.ckpt_regs, frame) save_tm_user_regs() 547 if (current->thread.used_vr) { save_tm_user_regs() 548 flush_altivec_to_thread(current); save_tm_user_regs() 549 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state, save_tm_user_regs() 554 ¤t->thread.transact_vr, save_tm_user_regs() 559 ¤t->thread.vr_state, save_tm_user_regs() 576 current->thread.vrsave = mfspr(SPRN_VRSAVE); save_tm_user_regs() 577 if (__put_user(current->thread.vrsave, save_tm_user_regs() 581 if (__put_user(current->thread.transact_vrsave, save_tm_user_regs() 585 if (__put_user(current->thread.vrsave, save_tm_user_regs() 591 if (copy_fpr_to_user(&frame->mc_fregs, current)) save_tm_user_regs() 594 if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current)) save_tm_user_regs() 597 if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) save_tm_user_regs() 608 if (current->thread.used_vsr) { save_tm_user_regs() 609 __giveup_vsx(current); save_tm_user_regs() 610 if (copy_vsx_to_user(&frame->mc_vsregs, current)) save_tm_user_regs() 614 current)) save_tm_user_regs() 617 if (copy_vsx_to_user(&tm_frame->mc_vsregs, current)) save_tm_user_regs() 628 if (current->thread.used_spe) { save_tm_user_regs() 629 flush_spe_to_thread(current); save_tm_user_regs() 630 if (__copy_to_user(&frame->mc_vregs, current->thread.evr, save_tm_user_regs() 639 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) save_tm_user_regs() 659 * Restore the current user register values from the user stack, 692 * current->thread.fpr/vr/evr. That way, if we get preempted restore_user_regs() 694 * tempted to save the current CPU state into the thread_struct restore_user_regs() 702 * current->thread when it next does altivec instructions restore_user_regs() 707 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, restore_user_regs() 710 } else if (current->thread.used_vr) restore_user_regs() 711 memset(¤t->thread.vr_state, 0, restore_user_regs() 715 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) restore_user_regs() 718 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_user_regs() 720 if (copy_fpr_from_user(current, &sr->mc_fregs)) restore_user_regs() 726 * current->thread when it next does VSX instruction. restore_user_regs() 734 if (copy_vsx_from_user(current, &sr->mc_vsregs)) restore_user_regs() 736 } else if (current->thread.used_vsr) restore_user_regs() 738 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_user_regs() 742 * current->thread when it next does FP instructions restore_user_regs() 748 current->thread when it next does spe instructions */ restore_user_regs() 752 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, restore_user_regs() 755 } else if (current->thread.used_spe) restore_user_regs() 756 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); restore_user_regs() 759 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) restore_user_regs() 768 * Restore the current user register values from the user stack, except for 790 err |= restore_general_regs(¤t->thread.ckpt_regs, sr); restore_tm_user_regs() 792 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); restore_tm_user_regs() 803 * current->thread.fpr/vr/evr. That way, if we get preempted restore_tm_user_regs() 805 * tempted to save the current CPU state into the thread_struct restore_tm_user_regs() 814 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs, restore_tm_user_regs() 816 __copy_from_user(¤t->thread.transact_vr, restore_tm_user_regs() 820 } else if (current->thread.used_vr) { restore_tm_user_regs() 821 memset(¤t->thread.vr_state, 0, restore_tm_user_regs() 823 memset(¤t->thread.transact_vr, 0, restore_tm_user_regs() 828 if (__get_user(current->thread.vrsave, restore_tm_user_regs() 830 __get_user(current->thread.transact_vrsave, restore_tm_user_regs() 834 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_tm_user_regs() 839 if (copy_fpr_from_user(current, &sr->mc_fregs) || restore_tm_user_regs() 840 copy_transact_fpr_from_user(current, &tm_sr->mc_fregs)) restore_tm_user_regs() 850 if (copy_vsx_from_user(current, &sr->mc_vsregs) || restore_tm_user_regs() 851 copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs)) restore_tm_user_regs() 853 } else if (current->thread.used_vsr) restore_tm_user_regs() 855 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_user_regs() 856 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_user_regs() 866 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, restore_tm_user_regs() 869 } else if (current->thread.used_spe) restore_tm_user_regs() 870 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); restore_tm_user_regs() 873 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs restore_tm_user_regs() 893 current->thread.tm_texasr |= TEXASR_FS; restore_tm_user_regs() 895 tm_recheckpoint(¤t->thread, msr); restore_tm_user_regs() 899 do_load_up_transact_fpu(¤t->thread); restore_tm_user_regs() 900 regs->msr |= (MSR_FP | current->thread.fpexc_mode); restore_tm_user_regs() 904 do_load_up_transact_altivec(¤t->thread); restore_tm_user_regs() 1021 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { handle_rt_signal32() 1023 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; handle_rt_signal32() 1050 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ handle_rt_signal32() 1075 current->comm, current->pid, handle_rt_signal32() 1201 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked) sys_swapcontext() 1241 current->restart_block.fn = do_no_restart_syscall; sys_rt_sigreturn() 1300 current->comm, current->pid, sys_rt_sigreturn() 1303 force_sig(SIGSEGV, current); sys_rt_sigreturn() 1318 unsigned long new_dbcr0 = current->thread.debug.dbcr0; sys_debug_setcontext() 1333 current->thread.debug.dbcr1)) { sys_debug_setcontext() 1368 current->thread.debug.dbcr0 = new_dbcr0; sys_debug_setcontext() 1392 current->comm, current->pid, sys_debug_setcontext() 1395 force_sig(SIGSEGV, current); sys_debug_setcontext() 1446 if (vdso32_sigtramp && current->mm->context.vdso_base) { handle_signal32() 1448 tramp = current->mm->context.vdso_base + vdso32_sigtramp; handle_signal32() 1470 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ handle_signal32() 1490 current->comm, current->pid, handle_signal32() 1514 current->restart_block.fn = do_no_restart_syscall; sys_sigreturn() 1562 current->comm, current->pid, sys_sigreturn() 1565 force_sig(SIGSEGV, current); sys_sigreturn()
|
H A D | signal_64.c | 115 if (current->thread.used_vr) { setup_sigcontext() 116 flush_altivec_to_thread(current); setup_sigcontext() 118 err |= __copy_to_user(v_regs, ¤t->thread.vr_state, setup_sigcontext() 129 current->thread.vrsave = mfspr(SPRN_VRSAVE); setup_sigcontext() 130 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); setup_sigcontext() 134 flush_fp_to_thread(current); setup_sigcontext() 136 err |= copy_fpr_to_user(&sc->fp_regs, current); setup_sigcontext() 149 if (current->thread.used_vsr && ctx_has_vsx_region) { setup_sigcontext() 150 __giveup_vsx(current); setup_sigcontext() 152 err |= copy_vsx_to_user(v_regs, current); setup_sigcontext() 212 flush_fp_to_thread(current); setup_tm_sigcontexts() 219 if (current->thread.used_vr) { setup_tm_sigcontexts() 220 flush_altivec_to_thread(current); setup_tm_sigcontexts() 222 err |= __copy_to_user(v_regs, ¤t->thread.vr_state, setup_tm_sigcontexts() 229 ¤t->thread.transact_vr, setup_tm_sigcontexts() 233 ¤t->thread.vr_state, setup_tm_sigcontexts() 245 current->thread.vrsave = mfspr(SPRN_VRSAVE); setup_tm_sigcontexts() 246 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); setup_tm_sigcontexts() 248 err |= __put_user(current->thread.transact_vrsave, setup_tm_sigcontexts() 251 err |= __put_user(current->thread.vrsave, setup_tm_sigcontexts() 260 err |= copy_fpr_to_user(&sc->fp_regs, current); setup_tm_sigcontexts() 262 err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); setup_tm_sigcontexts() 264 err |= copy_fpr_to_user(&tm_sc->fp_regs, current); setup_tm_sigcontexts() 272 if (current->thread.used_vsr) { setup_tm_sigcontexts() 273 __giveup_vsx(current); setup_tm_sigcontexts() 277 err |= copy_vsx_to_user(v_regs, current); setup_tm_sigcontexts() 280 err |= copy_transact_vsx_to_user(tm_v_regs, current); setup_tm_sigcontexts() 282 err |= copy_vsx_to_user(tm_v_regs, current); setup_tm_sigcontexts() 296 ¤t->thread.ckpt_regs, GP_REGS_SIZE); setup_tm_sigcontexts() 354 * current->thread.fpr/vr. That way, if we get preempted restore_sigcontext() 356 * tempted to save the current CPU state into the thread_struct restore_sigcontext() 363 * This has to be done before copying stuff into current->thread.fpr/vr restore_sigcontext() 376 err |= __copy_from_user(¤t->thread.vr_state, v_regs, restore_sigcontext() 378 else if (current->thread.used_vr) restore_sigcontext() 379 memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); restore_sigcontext() 382 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); restore_sigcontext() 384 current->thread.vrsave = 0; restore_sigcontext() 386 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_sigcontext() 389 err |= copy_fpr_from_user(current, &sc->fp_regs); restore_sigcontext() 398 err |= copy_vsx_from_user(current, v_regs); restore_sigcontext() 401 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_sigcontext() 425 err |= __copy_from_user(¤t->thread.ckpt_regs, sc->gp_regs, restore_tm_sigcontexts() 437 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); restore_tm_sigcontexts() 456 err |= __get_user(current->thread.ckpt_regs.ctr, restore_tm_sigcontexts() 458 err |= __get_user(current->thread.ckpt_regs.link, restore_tm_sigcontexts() 460 err |= __get_user(current->thread.ckpt_regs.xer, restore_tm_sigcontexts() 462 err |= __get_user(current->thread.ckpt_regs.ccr, restore_tm_sigcontexts() 473 * current->thread.fpr/vr. That way, if we get preempted restore_tm_sigcontexts() 475 * tempted to save the current CPU state into the thread_struct restore_tm_sigcontexts() 482 * This has to be done before copying stuff into current->thread.fpr/vr restore_tm_sigcontexts() 499 err |= __copy_from_user(¤t->thread.vr_state, v_regs, restore_tm_sigcontexts() 501 err |= __copy_from_user(¤t->thread.transact_vr, tm_v_regs, restore_tm_sigcontexts() 504 else if (current->thread.used_vr) { restore_tm_sigcontexts() 505 memset(¤t->thread.vr_state, 0, 33 * sizeof(vector128)); restore_tm_sigcontexts() 506 memset(¤t->thread.transact_vr, 0, 33 * sizeof(vector128)); restore_tm_sigcontexts() 510 err |= __get_user(current->thread.vrsave, restore_tm_sigcontexts() 512 err |= __get_user(current->thread.transact_vrsave, restore_tm_sigcontexts() 516 current->thread.vrsave = 0; restore_tm_sigcontexts() 517 current->thread.transact_vrsave = 0; restore_tm_sigcontexts() 520 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_tm_sigcontexts() 523 err |= copy_fpr_from_user(current, &sc->fp_regs); restore_tm_sigcontexts() 524 err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); restore_tm_sigcontexts() 534 err |= copy_vsx_from_user(current, v_regs); restore_tm_sigcontexts() 535 err |= copy_transact_vsx_from_user(current, tm_v_regs); restore_tm_sigcontexts() 538 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_sigcontexts() 539 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_sigcontexts() 545 current->thread.tm_texasr |= TEXASR_FS; restore_tm_sigcontexts() 547 tm_recheckpoint(¤t->thread, msr); restore_tm_sigcontexts() 551 do_load_up_transact_fpu(¤t->thread); restore_tm_sigcontexts() 552 regs->msr |= (MSR_FP | current->thread.fpexc_mode); restore_tm_sigcontexts() 556 do_load_up_transact_altivec(¤t->thread); restore_tm_sigcontexts() 635 ¤t->blocked, sizeof(sigset_t))) sys_swapcontext() 684 current->restart_block.fn = do_no_restart_syscall; sys_rt_sigreturn() 719 current->comm, current->pid, "rt_sigreturn", sys_rt_sigreturn() 722 force_sig(SIGSEGV, current); sys_rt_sigreturn() 769 current->thread.fp_state.fpscr = 0; handle_rt_signal64() 772 if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { handle_rt_signal64() 773 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; handle_rt_signal64() 823 current->comm, current->pid, "setup_rt_frame", handle_rt_signal64()
|
H A D | process.c | 81 * If we are saving the current thread's registers, and the giveup_fpu_maybe_transactional() 86 if (tsk == current && tsk->thread.regs && giveup_fpu_maybe_transactional() 99 * If we are saving the current thread's registers, and the giveup_altivec_maybe_transactional() 104 if (tsk == current && tsk->thread.regs && giveup_altivec_maybe_transactional() 139 * This should only ever be called for current or flush_fp_to_thread() 145 BUG_ON(tsk != current); flush_fp_to_thread() 160 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) enable_kernel_fp() 161 giveup_fpu_maybe_transactional(current); enable_kernel_fp() 176 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) enable_kernel_altivec() 177 giveup_altivec_maybe_transactional(current); enable_kernel_altivec() 196 BUG_ON(tsk != current); flush_altivec_to_thread() 212 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) enable_kernel_vsx() 213 giveup_vsx(current); enable_kernel_vsx() 236 BUG_ON(tsk != current); flush_vsx_to_thread() 253 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) enable_kernel_spe() 254 giveup_spe(current); enable_kernel_spe() 269 BUG_ON(tsk != current); flush_spe_to_thread() 282 * and the current task has some state, discard it. 287 if (last_task_used_math == current) discard_lazy_cpu_state() 290 if (last_task_used_altivec == current) discard_lazy_cpu_state() 294 if (last_task_used_vsx == current) discard_lazy_cpu_state() 298 if (last_task_used_spe == current) discard_lazy_cpu_state() 311 current->thread.trap_nr = signal_code; do_send_trap() 321 force_sig_info(SIGTRAP, &info, current); do_send_trap() 329 current->thread.trap_nr = TRAP_HWBKPT; do_break() 345 force_sig_info(SIGTRAP, &info, current); do_break() 416 if ((current->thread.debug.dbcr0 & DBCR0_IDM) switch_booke_debug_regs() 555 * Use the current MSR TM suspended bit to track if we have tm_reclaim_thread() 565 * We do this using the current MSR, rather tracking it in tm_reclaim_thread() 578 * indicate their current validity. tm_reclaim_thread() 586 tm_reclaim_thread(¤t->thread, current_thread_info(), cause); tm_reclaim_current() 597 * (current) FPRs into oldtask->thread.transact_fpr[]. tm_reclaim_task() 744 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; restore_tm_state() 748 load_fp_state(¤t->thread.fp_state); restore_tm_state() 749 regs->msr |= current->thread.fpexc_mode; restore_tm_state() 753 load_vr_state(¤t->thread.vr_state); restore_tm_state() 867 old_thread = ¤t->thread; __switch_to() 1057 show_stack(current, (unsigned long *) regs->gpr[1]); show_regs() 1072 flush_ptrace_hw_breakpoint(current); flush_thread() 1074 set_debug_reg_defaults(¤t->thread); flush_thread() 1085 * copy the current task into the new thread. 1095 * flush but it removes the checkpointed state from the current CPU and arch_dup_task_struct() 1214 p->thread.dscr_inherit = current->thread.dscr_inherit; copy_thread() 1215 p->thread.dscr = current->thread.dscr; copy_thread() 1237 if (!current->thread.regs) { start_thread() 1238 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; start_thread() 1239 current->thread.regs = regs - 1; start_thread() 1310 current->thread.used_vsr = 0; start_thread() 1312 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state)); start_thread() 1313 current->thread.fp_save_area = NULL; start_thread() 1315 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state)); start_thread() 1316 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ start_thread() 1317 current->thread.vr_save_area = NULL; start_thread() 1318 current->thread.vrsave = 0; start_thread() 1319 current->thread.used_vr = 0; start_thread() 1322 memset(current->thread.evr, 0, sizeof(current->thread.evr)); start_thread() 1323 current->thread.acc = 0; start_thread() 1324 current->thread.spefscr = 0; start_thread() 1325 current->thread.used_spe = 0; start_thread() 1330 current->thread.tm_tfhar = 0; start_thread() 1331 current->thread.tm_texasr = 0; start_thread() 1332 current->thread.tm_tfiar = 0; start_thread() 1518 if (!p || p == current || p->state == TASK_RUNNING) get_wchan() 1546 int curr_frame = current->curr_ret_stack; show_stack() 1553 tsk = current; show_stack() 1555 if (tsk == current) show_stack() 1575 (void *)current->ret_stack[curr_frame].ret); show_stack() 1587 * We look for the "regshere" marker in the current frame. show_stack() 1633 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) arch_align_stack()
|
H A D | uprobes.c | 62 * @regs: reflects the saved user state of current task. 66 struct arch_uprobe_task *autask = ¤t->utask->autask; arch_uprobe_pre_xol() 68 autask->saved_trap_nr = current->thread.trap_nr; arch_uprobe_pre_xol() 69 current->thread.trap_nr = UPROBE_TRAP_NR; arch_uprobe_pre_xol() 70 regs->nip = current->utask->xol_vaddr; arch_uprobe_pre_xol() 72 user_enable_single_step(current); arch_uprobe_pre_xol() 114 struct uprobe_task *utask = current->utask; arch_uprobe_post_xol() 116 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); arch_uprobe_post_xol() 118 current->thread.trap_nr = utask->autask.saved_trap_nr; arch_uprobe_post_xol() 129 user_disable_single_step(current); arch_uprobe_post_xol() 169 struct uprobe_task *utask = current->utask; arch_uprobe_abort_xol() 171 current->thread.trap_nr = utask->autask.saved_trap_nr; arch_uprobe_abort_xol() 174 user_disable_single_step(current); arch_uprobe_abort_xol()
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | current.h | 6 * Derived from "include/asm-i386/current.h" 16 #define current ((struct task_struct *const)S390_lowcore.current_task) macro
|
/linux-4.4.14/arch/m68k/include/asm/ |
H A D | current.h | 6 register struct task_struct *current __asm__("%a2"); 13 * current and lose _current_task. 24 #define current get_current() macro
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | current.h | 1 /* include/asm/current.h 7 * Derived from "include/asm-s390/current.h" by 9 * Derived from "include/asm-i386/current.h" 17 register struct task_struct *current asm("g4"); 31 #define current __get_current() macro
|
H A D | mmu_context_32.h | 24 /* Switch the current MM context. */ 30 /* Activate a new MM instance for the current task. */
|
/linux-4.4.14/arch/frv/include/asm/ |
H A D | current.h | 0 /* current.h: FRV current task pointer 18 * dedicate GR29 to keeping the current task pointer 20 register struct task_struct *current asm("gr29"); 22 #define get_current() current
|
H A D | irq_regs.h | 16 * Per-cpu current frame pointer - the location of the last exception frame on 18 * - on FRV, GR28 is dedicated to keeping a pointer to the current exception
|
/linux-4.4.14/arch/um/kernel/ |
H A D | exec.c | 12 #include <asm/current.h> 25 arch_flush_thread(¤t->thread.arch); flush_thread() 27 ret = unmap(¤t->mm->context.id, 0, STUB_START, 0, &data); flush_thread() 28 ret = ret || unmap(¤t->mm->context.id, STUB_END, flush_thread() 33 force_sig(SIGKILL, current); flush_thread() 38 __switch_mm(¤t->mm->context.id); flush_thread() 45 current->ptrace &= ~PT_DTRACE; start_thread()
|
H A D | trap.c | 11 #include <asm/current.h> 27 struct mm_struct *mm = current->mm; handle_page_fault() 78 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) handle_page_fault() 94 current->maj_flt++; handle_page_fault() 96 current->min_flt++; handle_page_fault() 143 struct task_struct *tsk = current; show_segv_info() 169 current->thread.arch.faultinfo = fi; bad_segv() 170 force_sig_info(SIGSEGV, &si, current); bad_segv() 175 force_sigsegv(SIGSEGV, current); fatal_sigsegv() 176 do_signal(¤t->thread.regs); fatal_sigsegv() 213 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); segv() 219 else if (current->mm == NULL) { segv() 242 catcher = current->thread.fault_catcher; segv() 246 current->thread.fault_addr = (void *) address; segv() 249 else if (current->thread.fault_addr != NULL) segv() 267 current->thread.arch.faultinfo = fi; segv() 268 force_sig_info(SIGBUS, &si, current); segv() 273 current->thread.arch.faultinfo = fi; segv() 274 force_sig_info(SIGSEGV, &si, current); segv() 279 current->thread.segv_regs = NULL; segv() 310 current->thread.arch.faultinfo = *fi; relay_signal() 320 force_sig_info(sig, &clean_si, current); relay_signal() 325 if (current->thread.fault_catcher != NULL) bus_handler() 326 UML_LONGJMP(current->thread.fault_catcher, 1); bus_handler()
|
H A D | signal.c | 28 if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) handle_signal() 55 sp = current->sas_ss_sp + current->sas_ss_size; handle_signal() 103 if (current->ptrace & PT_DTRACE) do_signal() 104 current->thread.singlestep_syscall = do_signal() 105 is_syscall(PT_REGS_IP(¤t->thread.regs)); do_signal()
|
H A D | process.c | 24 #include <asm/current.h> 89 arch_switch_to(current); __switch_to() 91 return current->thread.prev_sched; __switch_to() 96 struct pt_regs *regs = ¤t->thread.regs; interrupt_end() 112 return task_pid_nr(current); get_current_pid() 124 if (current->thread.prev_sched != NULL) new_thread_handler() 125 schedule_tail(current->thread.prev_sched); new_thread_handler() 126 current->thread.prev_sched = NULL; new_thread_handler() 128 fn = current->thread.request.u.thread.proc; new_thread_handler() 129 arg = current->thread.request.u.thread.arg; new_thread_handler() 135 userspace(¤t->thread.regs.regs); new_thread_handler() 143 schedule_tail(current->thread.prev_sched); fork_handler() 150 arch_switch_to(current); fork_handler() 152 current->thread.prev_sched = NULL; fork_handler() 154 userspace(¤t->thread.regs.regs); fork_handler() 161 int kthread = current->flags & PF_KTHREAD; copy_thread() 175 arch_copy_thread(¤t->thread.arch, &p->thread.arch); copy_thread() 339 struct task_struct *task = t ? t : current; singlestepping() 360 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) arch_align_stack() 371 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) get_wchan()
|
H A D | sysrq.c | 33 struct pt_regs *segv_regs = current->thread.segv_regs; show_stack() 57 dump_trace(current, &stackops, NULL); show_stack()
|
H A D | syscall.c | 12 #include <asm/current.h>
|
/linux-4.4.14/arch/um/drivers/ |
H A D | mconsole_kern.h | 27 #define CONFIG_CHUNK(str, size, current, chunk, end) \ 29 current += strlen(chunk); \ 30 if(current >= size) \ 37 current++; \
|
/linux-4.4.14/arch/c6x/include/asm/ |
H A D | page.h | 6 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
H A D | switch_to.h | 26 current->thread.wchan = (u_long) __builtin_return_address(0); \ 30 current->thread.wchan = 0; \
|
/linux-4.4.14/arch/microblaze/include/asm/ |
H A D | current.h | 15 * Register used to hold the current task pointer while in the kernel. 22 * Dedicate r31 to keeping the current task pointer 24 register struct task_struct *current asm("r31"); 26 # define get_current() current
|
H A D | elf.h | 28 set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
|
/linux-4.4.14/arch/mips/power/ |
H A D | cpu.c | 22 save_fp(current); save_processor_state() 24 save_dsp(current); save_processor_state() 32 restore_fp(current); restore_processor_state() 34 restore_dsp(current); restore_processor_state()
|
/linux-4.4.14/arch/mn10300/include/asm/ |
H A D | current.h | 17 * dedicate E2 to keeping the current task pointer 21 register struct task_struct *const current asm("e2") __attribute__((used)); 23 #define get_current() current 34 #define current get_current() macro
|
/linux-4.4.14/arch/parisc/include/asm/ |
H A D | current.h | 13 #define current get_current() macro
|
/linux-4.4.14/arch/cris/include/asm/ |
H A D | current.h | 13 #define current get_current() macro
|
H A D | tlbflush.h | 12 * - flush_tlb() flushes the current mm struct TLBs 36 flush_tlb_mm(current->mm); flush_tlb()
|
/linux-4.4.14/arch/m32r/include/asm/ |
H A D | current.h | 13 #define current (get_current()) macro
|
/linux-4.4.14/arch/avr32/include/asm/ |
H A D | current.h | 13 #define current get_current() macro
|
/linux-4.4.14/drivers/tty/ |
H A D | tty_audit.c | 67 struct task_struct *tsk = current; tty_audit_log() 92 * the current task. @buf->mutex must be locked. 110 * Only needs to be called if current->signal->tty_audit_buf != %NULL. 116 buf = current->signal->tty_audit_buf; tty_audit_exit() 117 current->signal->tty_audit_buf = NULL; tty_audit_exit() 131 * Set up TTY audit state in @sig from current. @sig needs no locking. 135 sig->audit_tty = current->signal->audit_tty; tty_audit_fork() 136 sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd; tty_audit_fork() 148 spin_lock_irqsave(¤t->sighand->siglock, flags); tty_audit_tiocsti() 149 should_audit = current->signal->audit_tty; tty_audit_tiocsti() 150 buf = current->signal->tty_audit_buf; tty_audit_tiocsti() 153 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_tiocsti() 169 auid = audit_get_loginuid(current); tty_audit_tiocsti() 170 sessionid = audit_get_sessionid(current); tty_audit_tiocsti() 176 * tty_audit_push_current - Flush current's pending audit data 184 struct task_struct *tsk = current; tty_audit_push_current() 227 spin_lock_irqsave(¤t->sighand->siglock, flags); tty_audit_buf_get() 228 if (likely(!current->signal->audit_tty)) tty_audit_buf_get() 230 buf = current->signal->tty_audit_buf; tty_audit_buf_get() 235 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_buf_get() 245 spin_lock_irqsave(¤t->sighand->siglock, flags); tty_audit_buf_get() 246 if (!current->signal->audit_tty) tty_audit_buf_get() 248 buf = current->signal->tty_audit_buf; tty_audit_buf_get() 250 current->signal->tty_audit_buf = buf2; tty_audit_buf_get() 257 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_buf_get() 279 spin_lock_irqsave(¤t->sighand->siglock, flags); tty_audit_add_data() 280 audit_log_tty_passwd = current->signal->audit_tty_log_passwd; tty_audit_add_data() 281 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_add_data() 323 * Make sure no audit data is pending for @tty on the current process. 330 spin_lock_irqsave(¤t->sighand->siglock, flags); tty_audit_push() 331 if (likely(!current->signal->audit_tty)) { tty_audit_push() 332 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_push() 335 buf = current->signal->tty_audit_buf; tty_audit_push() 338 spin_unlock_irqrestore(¤t->sighand->siglock, flags); tty_audit_push()
|
/linux-4.4.14/include/linux/platform_data/ |
H A D | ad7793.h | 55 * enum ad7793_current_source_direction - AD7793 excitation current direction 57 * IOUT1, current source IEXC2 connected to pin IOUT2. 59 * IOUT1, current source IEXC1 connected to pin IOUT2. 60 * @AD7793_IEXEC1_IEXEC2_IOUT1: Both current sources connected to pin IOUT1. 61 * Only valid when the current sources are set to 10 uA or 210 uA. 62 * @AD7793_IEXEC1_IEXEC2_IOUT2: Both current sources connected to Pin IOUT2. 63 * Only valid when the current ources are set to 10 uA or 210 uA. 73 * enum ad7793_excitation_current - AD7793 excitation current selection 74 * @AD7793_IX_DISABLED: Excitation current Disabled. 75 * @AD7793_IX_10uA: Enable 10 micro-ampere excitation current. 76 * @AD7793_IX_210uA: Enable 210 micro-ampere excitation current. 77 * @AD7793_IX_1mA: Enable 1 milli-Ampere excitation current. 89 * @burnout_current: If set to true the 100nA burnout current is enabled. 96 * @exitation_current: Excitation current selection 97 * @current_source_direction: Excitation current direction selection
|
H A D | ad7791.h | 7 * @burnout_current: If set to true the 100mA burnout current is enabled.
|
H A D | lp8755.h | 23 *OCP : over current protect activated 27 *I_LOAD : current measured
|
/linux-4.4.14/tools/power/cpupower/lib/ |
H A D | sysfs.c | 274 struct cpufreq_available_governors *current = NULL; sysfs_get_freq_available_governors() local 289 if (current) { sysfs_get_freq_available_governors() 290 current->next = malloc(sizeof(*current)); sysfs_get_freq_available_governors() 291 if (!current->next) sysfs_get_freq_available_governors() 293 current = current->next; sysfs_get_freq_available_governors() 298 current = first; sysfs_get_freq_available_governors() 300 current->first = first; sysfs_get_freq_available_governors() 301 current->next = NULL; sysfs_get_freq_available_governors() 303 current->governor = malloc(i - pos + 1); sysfs_get_freq_available_governors() 304 if (!current->governor) sysfs_get_freq_available_governors() 307 memcpy(current->governor, linebuf + pos, i - pos); sysfs_get_freq_available_governors() 308 current->governor[i - pos] = '\0'; sysfs_get_freq_available_governors() 317 current = first->next; sysfs_get_freq_available_governors() 321 first = current; sysfs_get_freq_available_governors() 330 struct cpufreq_available_frequencies *current = NULL; sysfs_get_available_frequencies() local 348 if (current) { sysfs_get_available_frequencies() 349 current->next = malloc(sizeof(*current)); sysfs_get_available_frequencies() 350 if (!current->next) sysfs_get_available_frequencies() 352 current = current->next; sysfs_get_available_frequencies() 357 current = first; sysfs_get_available_frequencies() 359 current->first = first; sysfs_get_available_frequencies() 360 current->next = NULL; sysfs_get_available_frequencies() 364 if (sscanf(one_value, "%lu", ¤t->frequency) != 1) sysfs_get_available_frequencies() 375 current = first->next; sysfs_get_available_frequencies() 377 first = current; sysfs_get_available_frequencies() 386 struct cpufreq_affected_cpus *current = NULL; sysfs_get_cpu_list() local 403 if (current) { sysfs_get_cpu_list() 404 current->next = malloc(sizeof(*current)); sysfs_get_cpu_list() 405 if (!current->next) sysfs_get_cpu_list() 407 current = current->next; sysfs_get_cpu_list() 412 current = first; sysfs_get_cpu_list() 414 current->first = first; sysfs_get_cpu_list() 415 current->next = NULL; sysfs_get_cpu_list() 420 if (sscanf(one_value, "%u", ¤t->cpu) != 1) sysfs_get_cpu_list() 431 current = first->next; sysfs_get_cpu_list() 433 first = current; sysfs_get_cpu_list() 451 struct cpufreq_stats *current = NULL; sysfs_get_freq_stats() local 470 if (current) { sysfs_get_freq_stats() 471 current->next = malloc(sizeof(*current)); sysfs_get_freq_stats() 472 if (!current->next) sysfs_get_freq_stats() 474 current = current->next; sysfs_get_freq_stats() 479 current = first; sysfs_get_freq_stats() 481 current->first = first; sysfs_get_freq_stats() 482 current->next = NULL; sysfs_get_freq_stats() 487 ¤t->frequency, sysfs_get_freq_stats() 488 ¤t->time_in_state) != 2) sysfs_get_freq_stats() 491 *total_time = *total_time + current->time_in_state; sysfs_get_freq_stats() 500 current = first->next; sysfs_get_freq_stats() 502 first = current; sysfs_get_freq_stats()
|
/linux-4.4.14/arch/ia64/include/asm/ |
H A D | current.h | 12 * In kernel mode, thread pointer (r13) is used to point to the current task 15 #define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) macro
|
H A D | switch_to.h | 67 /* "next" in old context is "current" in new context */ \ 68 if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ 69 (task_cpu(current) != \ 70 task_thread_info(current)->last_cpu))) { \ 71 platform_migrate(current); \ 72 task_thread_info(current)->last_cpu = task_cpu(current); \
|
/linux-4.4.14/arch/m68k/mm/ |
H A D | fault.c | 25 siginfo.si_signo = current->thread.signo; send_fault_sig() 26 siginfo.si_code = current->thread.code; send_fault_sig() 27 siginfo.si_addr = (void *)current->thread.faddr; send_fault_sig() 33 &siginfo, current); send_fault_sig() 40 // &siginfo, current); send_fault_sig() 72 struct mm_struct *mm = current->mm; do_page_fault() 142 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) do_page_fault() 162 current->maj_flt++; do_page_fault() 164 current->min_flt++; do_page_fault() 196 current->thread.signo = SIGBUS; do_page_fault() 197 current->thread.faddr = address; do_page_fault() 201 current->thread.signo = SIGBUS; do_page_fault() 202 current->thread.code = BUS_ADRERR; do_page_fault() 203 current->thread.faddr = address; do_page_fault() 207 current->thread.signo = SIGSEGV; do_page_fault() 208 current->thread.code = SEGV_MAPERR; do_page_fault() 209 current->thread.faddr = address; do_page_fault() 213 current->thread.signo = SIGSEGV; do_page_fault() 214 current->thread.code = SEGV_ACCERR; do_page_fault() 215 current->thread.faddr = address; do_page_fault()
|
/linux-4.4.14/include/linux/ |
H A D | context_tracking.h | 58 * ct_state() - return the current context tracking state if known 60 * Returns the current cpu's context tracking state if context tracking 90 vtime_guest_enter(current); guest_enter() 92 current->flags |= PF_VCPU; guest_enter() 104 vtime_guest_exit(current); guest_exit() 106 current->flags &= ~PF_VCPU; guest_exit() 117 vtime_account_system(current); guest_enter() 118 current->flags |= PF_VCPU; guest_enter() 124 vtime_account_system(current); guest_exit() 125 current->flags &= ~PF_VCPU; guest_exit()
|
H A D | personality.h | 14 #define set_personality(pers) (current->personality = (pers))
|
H A D | delayacct.h | 26 * Used to set current->delays->flags 55 if (current->delays) delayacct_set_flag() 56 current->delays->flags |= flag; delayacct_set_flag() 61 if (current->delays) delayacct_clear_flag() 62 current->delays->flags &= ~flag; delayacct_clear_flag() 86 if (current->delays) delayacct_blkio_start() 92 if (current->delays) delayacct_blkio_end() 114 if (current->delays) delayacct_freepages_start() 120 if (current->delays) delayacct_freepages_end()
|
H A D | perf_regs.h | 37 regs_user->regs = task_pt_regs(current); perf_get_regs_user() 38 regs_user->abi = perf_reg_abi(current); perf_get_regs_user()
|
H A D | migrate_mode.h | 5 * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
|
H A D | ks0108.h | 34 /* Set the controller's current display state (0..1) */ 37 /* Set the controller's current startline (0..63) */ 40 /* Set the controller's current address (0..63) */ 43 /* Set the controller's current page (0..7) */
|
H A D | cpuset.h | 47 #define cpuset_current_mems_allowed (current->mems_allowed) 84 return task_spread_page(current); cpuset_do_page_mem_spread() 89 return task_spread_slab(current); cpuset_do_slab_mem_spread() 110 return read_seqcount_begin(¤t->mems_allowed_seq); read_mems_allowed_begin() 124 return read_seqcount_retry(¤t->mems_allowed_seq, seq); read_mems_allowed_retry() 131 task_lock(current); set_mems_allowed() 133 write_seqcount_begin(¤t->mems_allowed_seq); set_mems_allowed() 134 current->mems_allowed = nodemask; set_mems_allowed() 135 write_seqcount_end(¤t->mems_allowed_seq); set_mems_allowed() 137 task_unlock(current); set_mems_allowed()
|
H A D | tracehook.h | 34 * Some tracehook_*() inlines take arguments that the current tracing 61 int ptrace = current->ptrace; ptrace_report_syscall() 73 if (current->exit_code) { ptrace_report_syscall() 74 send_sig(current->exit_code, current, 1); ptrace_report_syscall() 75 current->exit_code = 0; ptrace_report_syscall() 78 return fatal_signal_pending(current); ptrace_report_syscall() 83 * @regs: user register state of current task 86 * current task has just entered the kernel for a system call. 108 * @regs: user register state of current task 112 * current task has just finished an attempted system call. Full 127 user_single_step_siginfo(current, regs, &info); tracehook_report_syscall_exit() 128 force_sig_info(SIGTRAP, &info, current); tracehook_report_syscall_exit() 171 * @regs: user-mode registers of @current task 190 if (unlikely(current->task_works)) tracehook_notify_resume()
|
H A D | hardirq.h | 37 account_irq_enter_time(current); \ 53 account_irq_exit_time(current); \
|
H A D | task_io_accounting_ops.h | 12 current->ioac.read_bytes += bytes; task_io_account_read() 26 current->ioac.write_bytes += bytes; task_io_account_write() 40 current->ioac.cancelled_write_bytes += bytes; task_io_account_cancelled_write()
|
H A D | amifd.h | 47 int track; /* current track (-1 == unknown) */ 48 unsigned char *trackbuf; /* current track (kmaloc()'d */ 57 int status; /* current error code for unit */
|
H A D | freezer.h | 57 if (likely(!freezing(current))) try_to_freeze_unsafe() 64 if (!(current->flags & PF_NOFREEZE)) try_to_freeze() 96 * freezer_do_not_count - tell freezer to ignore %current 98 * Tell freezers to ignore the current task when determining whether the 99 * target frozen state is reached. IOW, the current task will be 108 current->flags |= PF_FREEZER_SKIP; freezer_do_not_count() 112 * freezer_count - tell freezer to stop ignoring %current 114 * Undo freezer_do_not_count(). It tells freezers that %current should be 120 current->flags &= ~PF_FREEZER_SKIP; freezer_count() 133 current->flags &= ~PF_FREEZER_SKIP; freezer_count_unsafe()
|
H A D | ptrace.h | 145 if (unlikely(ptrace_event_enabled(current, event))) { ptrace_event() 146 current->ptrace_message = message; ptrace_event() 150 if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED) ptrace_event() 151 send_sig(SIGTRAP, current, 0); ptrace_event() 178 ns = task_active_pid_ns(rcu_dereference(current->parent)); ptrace_event_pid() 194 * Called with current's siglock and write_lock_irq(&tasklist_lock) held. 204 if (unlikely(ptrace) && current->ptrace) { ptrace_init_task() 205 child->ptrace = current->ptrace; ptrace_init_task() 206 __ptrace_link(child, current->parent); ptrace_init_task() 276 * @task: either current or a task stopped in %TASK_TRACED 290 * @task: either current or a task stopped in %TASK_TRACED 319 * @task: either current or a task stopped in %TASK_TRACED 349 * @code: current->exit_code value ptrace will stop with 370 * @code: current->exit_code value ptrace will stop with 385 #define current_pt_regs() task_pt_regs(current) 393 * unlike current_pt_regs(), this one is equal to task_pt_regs(current) 398 #define signal_pt_regs() task_pt_regs(current)
|
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/linux/ |
H A D | linux-prim.c | 79 spin_lock_irqsave(¤t->sighand->siglock, flags); cfs_block_allsigs() 80 old = current->blocked; cfs_block_allsigs() 81 sigfillset(¤t->blocked); cfs_block_allsigs() 83 spin_unlock_irqrestore(¤t->sighand->siglock, flags); cfs_block_allsigs() 94 spin_lock_irqsave(¤t->sighand->siglock, flags); cfs_block_sigs() 95 old = current->blocked; cfs_block_sigs() 96 sigaddsetmask(¤t->blocked, sigs); cfs_block_sigs() 98 spin_unlock_irqrestore(¤t->sighand->siglock, flags); cfs_block_sigs() 109 spin_lock_irqsave(¤t->sighand->siglock, flags); cfs_block_sigsinv() 110 old = current->blocked; cfs_block_sigsinv() 111 sigaddsetmask(¤t->blocked, ~sigs); cfs_block_sigsinv() 113 spin_unlock_irqrestore(¤t->sighand->siglock, flags); cfs_block_sigsinv() 124 spin_lock_irqsave(¤t->sighand->siglock, flags); cfs_restore_sigs() 125 current->blocked = old; cfs_restore_sigs() 127 spin_unlock_irqrestore(¤t->sighand->siglock, flags); cfs_restore_sigs() 134 return signal_pending(current); cfs_signal_pending() 143 spin_lock_irqsave(¤t->sighand->siglock, flags); cfs_clear_sigpending() 144 clear_tsk_thread_flag(current, TIF_SIGPENDING); cfs_clear_sigpending() 145 spin_unlock_irqrestore(¤t->sighand->siglock, flags); cfs_clear_sigpending()
|
/linux-4.4.14/arch/h8300/kernel/ |
H A D | traps.c | 46 current->thread.esp0 = ssp; set_esp0() 60 pr_info("COMM=%s PID=%d\n", current->comm, current->pid); dump() 61 if (current->mm) { dump() 63 (int) current->mm->start_code, dump() 64 (int) current->mm->end_code, dump() 65 (int) current->mm->start_data, dump() 66 (int) current->mm->end_data, dump() 67 (int) current->mm->end_data, dump() 68 (int) current->mm->brk); dump() 70 (int) current->mm->start_stack, dump() 71 (int) PAGE_SIZE+(unsigned long)current); dump() 92 if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE)) dump()
|
/linux-4.4.14/arch/um/kernel/skas/ |
H A D | process.c | 23 cpu_tasks[0].task = current; start_kernel_proc() 48 if (current->mm == NULL) current_stub_stack() 51 return current->mm->context.id.stack; current_stub_stack()
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | current.h | 28 #define current get_current() macro 33 * We keep `current' in r2 for speed. 35 register struct task_struct *current asm ("r2");
|
/linux-4.4.14/arch/parisc/kernel/ |
H A D | ftrace.c | 29 if (!current->ret_stack) push_return_trace() 33 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { push_return_trace() 34 atomic_inc(¤t->trace_overrun); push_return_trace() 38 index = ++current->curr_ret_stack; push_return_trace() 40 current->ret_stack[index].ret = ret; push_return_trace() 41 current->ret_stack[index].func = func; push_return_trace() 42 current->ret_stack[index].calltime = time; push_return_trace() 53 index = current->curr_ret_stack; pop_return_trace() 64 *ret = current->ret_stack[index].ret; pop_return_trace() 65 trace->func = current->ret_stack[index].func; pop_return_trace() 66 trace->calltime = current->ret_stack[index].calltime; pop_return_trace() 67 trace->overrun = atomic_read(¤t->trace_overrun); pop_return_trace() 70 current->curr_ret_stack--; pop_return_trace() 107 * in current thread info. 118 if (unlikely(atomic_read(¤t->tracing_graph_pause))) prepare_ftrace_return() 144 current->curr_ret_stack--; prepare_ftrace_return() 170 range compared to current stack pointer? */ ftrace_function_trampoline()
|
H A D | sys_parisc32.c | 22 current->comm, current->pid, r20); sys32_unimplemented()
|
H A D | traps.c | 153 parisc_show_stack(current, NULL, regs); show_regs() 192 t = task ? task : current; parisc_show_stack() 198 if (t == current) { parisc_show_stack() 211 unwind_frame_init(&info, current, &r); parisc_show_stack() 239 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); die_if_kernel() 270 current->comm, task_pid_nr(current), str, err); die_if_kernel() 273 if (current->thread.flags & PARISC_KERNEL_DEATH) { die_if_kernel() 278 current->thread.flags |= PARISC_KERNEL_DEATH; die_if_kernel() 307 force_sig_info(SIGTRAP, &si, current); handle_gdb_break() 331 task_pid_nr(current), current->comm); handle_break() 459 unwind_frame_init(&info, current, regs); parisc_terminate() 614 force_sig_info(SIGILL, &si, current); handle_interruption() 622 force_sig_info(SIGFPE, &si, current); handle_interruption() 635 force_sig_info(SIGFPE, &si, current); handle_interruption() 723 down_read(¤t->mm->mmap_sem); handle_interruption() 724 vma = find_vma(current->mm,regs->iaoq[0]); handle_interruption() 731 up_read(¤t->mm->mmap_sem); handle_interruption() 734 up_read(¤t->mm->mmap_sem); handle_interruption() 751 force_sig_info(SIGSEGV, &si, current); handle_interruption() 763 task_pid_nr(current), current->comm); handle_interruption() 769 force_sig_info(SIGBUS, &si, current); handle_interruption() 783 task_pid_nr(current), current->comm); handle_interruption() 788 force_sig_info(SIGSEGV, &si, current); handle_interruption()
|
/linux-4.4.14/drivers/staging/dgnc/ |
H A D | dgnc_utils.c | 17 return signal_pending(current); dgnc_ms_sleep()
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | current.h | 17 #define current get_current() macro
|
H A D | irq_regs.h | 2 * Per-cpu current frame pointer - the location of the last exception frame on
|
/linux-4.4.14/fs/nfsd/ |
H A D | auth.h | 11 * Set the current process's fsuid/fsgid etc to those of the NFS
|
H A D | current_stateid.h | 9 * functions to set current state id 17 * functions to consume current state id
|
/linux-4.4.14/kernel/locking/ |
H A D | rwsem.h | 4 sem->owner = current; rwsem_set_owner()
|
H A D | rtmutex-debug.c | 79 if (task && task != current) { debug_rt_mutex_deadlock() 108 printk("%s/%d is deadlocking current task %s/%d\n\n", debug_rt_mutex_print_deadlock() 110 current->comm, task_pid_nr(current)); debug_rt_mutex_print_deadlock() 113 current->comm, task_pid_nr(current)); debug_rt_mutex_print_deadlock() 120 debug_show_held_locks(current); debug_rt_mutex_print_deadlock() 126 printk("\n%s/%d's [current] stackdump:\n\n", debug_rt_mutex_print_deadlock() 127 current->comm, task_pid_nr(current)); debug_rt_mutex_print_deadlock() 142 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); debug_rt_mutex_unlock()
|
H A D | spinlock_debug.c | 60 current->comm, task_pid_nr(current)); spin_dump() 84 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); debug_spin_lock_before() 92 lock->owner = current; debug_spin_lock_after() 99 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); debug_spin_unlock() 168 msg, raw_smp_processor_id(), current->comm, rwlock_bug() 169 task_pid_nr(current), lock); rwlock_bug() 193 raw_smp_processor_id(), current->comm, 194 current->pid, lock); 229 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); debug_write_lock_before() 237 lock->owner = current; debug_write_lock_after() 243 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); debug_write_unlock() 268 raw_smp_processor_id(), current->comm, 269 current->pid, lock);
|
/linux-4.4.14/include/uapi/linux/ |
H A D | gen_stats.h | 33 * @bps: current byte rate 34 * @pps: current packet rate 43 * @bps: current byte rate 44 * @pps: current packet rate
|
H A D | nvram.h | 10 /* for all current systems, this is where NVRAM starts */
|
/linux-4.4.14/arch/mips/cavium-octeon/ |
H A D | cpu.c | 16 #include <asm/current.h> 29 prefetch(¤t->thread.cp2); cnmips_cu2_call() 31 KSTK_STATUS(current) |= ST0_CU2; cnmips_cu2_call() 34 octeon_cop2_restore(&(current->thread.cp2)); cnmips_cu2_call()
|
/linux-4.4.14/arch/ia64/include/uapi/asm/ |
H A D | fcntl.h | 9 (personality(current->personality) != PER_LINUX32)
|
/linux-4.4.14/arch/xtensa/include/asm/ |
H A D | current.h | 2 * include/asm-xtensa/current.h 25 #define current get_current() macro
|
H A D | switch_to.h | 13 * checking that n isn't the current task, in which case it does nothing.
|
/linux-4.4.14/arch/arc/include/asm/ |
H A D | current.h | 20 #define current (curr_arc) macro 23 #include <asm-generic/current.h>
|
/linux-4.4.14/arch/sh/include/asm/ |
H A D | stackprotector.h | 23 current->stack_canary = canary; boot_init_stack_canary() 24 __stack_chk_guard = current->stack_canary; boot_init_stack_canary()
|
/linux-4.4.14/arch/arm/kernel/ |
H A D | perf_regs.c | 36 regs_user->regs = task_pt_regs(current); perf_get_regs_user() 37 regs_user->abi = perf_reg_abi(current); perf_get_regs_user()
|
/linux-4.4.14/arch/mips/loongson64/loongson-3/ |
H A D | cop2-ex.c | 19 #include <asm/current.h> 37 KSTK_STATUS(current) |= (ST0_CU1 | ST0_CU2); loongson_cu2_call() 39 KSTK_STATUS(current) |= ST0_FR; loongson_cu2_call() 41 KSTK_STATUS(current) &= ~ST0_FR; loongson_cu2_call() 46 _init_fpu(current->thread.fpu.fcr31); loongson_cu2_call() 49 _restore_fp(current); loongson_cu2_call()
|
/linux-4.4.14/kernel/ |
H A D | delayacct.c | 58 spin_lock_irqsave(¤t->delays->lock, flags); delayacct_end() 61 spin_unlock_irqrestore(¤t->delays->lock, flags); delayacct_end() 67 current->delays->blkio_start = ktime_get_ns(); __delayacct_blkio_start() 72 if (current->delays->flags & DELAYACCT_PF_SWAPIN) __delayacct_blkio_end() 74 delayacct_end(¤t->delays->blkio_start, __delayacct_blkio_end() 75 ¤t->delays->swapin_delay, __delayacct_blkio_end() 76 ¤t->delays->swapin_count); __delayacct_blkio_end() 78 delayacct_end(¤t->delays->blkio_start, __delayacct_blkio_end() 79 ¤t->delays->blkio_delay, __delayacct_blkio_end() 80 ¤t->delays->blkio_count); __delayacct_blkio_end() 149 current->delays->freepages_start = ktime_get_ns(); __delayacct_freepages_start() 154 delayacct_end(¤t->delays->freepages_start, __delayacct_freepages_end() 155 ¤t->delays->freepages_delay, __delayacct_freepages_end() 156 ¤t->delays->freepages_count); __delayacct_freepages_end()
|
H A D | user-return-notifier.c | 10 * Request a notification when the current cpu returns to userspace. Must be 16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); user_return_notifier_register() 29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); user_return_notifier_unregister()
|
H A D | freezer.c | 64 long save = current->state; __refrigerator() 66 pr_debug("%s entered refrigerator\n", current->comm); __refrigerator() 72 current->flags |= PF_FROZEN; __refrigerator() 73 if (!freezing(current) || __refrigerator() 75 current->flags &= ~PF_FROZEN; __refrigerator() 78 if (!(current->flags & PF_FROZEN)) __refrigerator() 84 pr_debug("%s left refrigerator\n", current->comm); __refrigerator() 160 * set_freezable - make %current freezable 162 * Mark %current freezable and enter refrigerator if necessary. 174 current->flags &= ~PF_NOFREEZE; set_freezable()
|
H A D | seccomp.c | 49 * with current->seccomp.filter, the most recently attached or inherited filter. 72 struct task_struct *task = current; populate_seccomp_data() 172 * @syscall: number of the current system call 182 lockless_dereference(current->seccomp.filter); seccomp_run_filters() 209 assert_spin_locked(¤t->sighand->siglock); seccomp_may_assign_mode() 211 if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) seccomp_may_assign_mode() 258 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); seccomp_can_sync_threads() 259 assert_spin_locked(¤t->sighand->siglock); seccomp_can_sync_threads() 262 caller = current; for_each_thread() 266 /* Skip current, since it is initiating the sync. */ for_each_thread() 288 * seccomp_sync_threads: sets all threads to use current's filter 299 BUG_ON(!mutex_is_locked(¤t->signal->cred_guard_mutex)); seccomp_sync_threads() 300 assert_spin_locked(¤t->sighand->siglock); seccomp_sync_threads() 303 caller = current; for_each_thread() 305 /* Skip current, since it needs no changes. */ for_each_thread() 313 * current's path will hold a reference. (This also for_each_thread() 363 if (!task_no_new_privs(current) && seccomp_prepare_filter() 416 * @filter: seccomp filter to add to the current process 418 * Caller must be holding current->sighand->siglock lock. 428 assert_spin_locked(¤t->sighand->siglock); seccomp_attach_filter() 432 for (walker = current->seccomp.filter; walker; walker = walker->prev) seccomp_attach_filter() 450 filter->prev = current->seccomp.filter; seccomp_attach_filter() 451 current->seccomp.filter = filter; seccomp_attach_filter() 503 info.si_call_addr = (void __user *)KSTK_EIP(current); seccomp_send_sigsys() 507 force_sig_info(SIGSYS, &info, current); seccomp_send_sigsys() 550 int mode = current->seccomp.mode; secure_computing_strict() 553 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) secure_computing_strict() 597 syscall_set_return_value(current, task_pt_regs(current), __seccomp_phase1_filter() 603 syscall_rollback(current, task_pt_regs(current)); __seccomp_phase1_filter() 629 * seccomp_phase1() - run fast path seccomp checks on the current syscall 653 int mode = current->seccomp.mode; seccomp_phase1() 655 syscall_get_nr(current, task_pt_regs(current)); seccomp_phase1() 658 unlikely(current->ptrace & PT_SUSPEND_SECCOMP)) seccomp_phase1() 675 * seccomp_phase2() - finish slow path seccomp work for the current syscall 684 struct pt_regs *regs = task_pt_regs(current); seccomp_phase2() 690 audit_seccomp(syscall_get_nr(current, regs), 0, action); seccomp_phase2() 693 if (!ptrace_event_enabled(current, PTRACE_EVENT_SECCOMP)) { seccomp_phase2() 694 syscall_set_return_value(current, regs, seccomp_phase2() 707 if (fatal_signal_pending(current)) seccomp_phase2() 709 if (syscall_get_nr(current, regs) < 0) seccomp_phase2() 718 return current->seccomp.mode; prctl_get_seccomp() 724 * Once current->seccomp.mode is non-zero, it may not be changed. 733 spin_lock_irq(¤t->sighand->siglock); seccomp_set_mode_strict() 741 seccomp_assign_mode(current, seccomp_mode); seccomp_set_mode_strict() 745 spin_unlock_irq(¤t->sighand->siglock); seccomp_set_mode_strict() 760 * Once current->seccomp.mode is non-zero, it may not be changed. 785 mutex_lock_killable(¤t->signal->cred_guard_mutex)) seccomp_set_mode_filter() 788 spin_lock_irq(¤t->sighand->siglock); seccomp_set_mode_filter() 799 seccomp_assign_mode(current, seccomp_mode); seccomp_set_mode_filter() 801 spin_unlock_irq(¤t->sighand->siglock); seccomp_set_mode_filter() 803 mutex_unlock(¤t->signal->cred_guard_mutex); seccomp_set_mode_filter() 839 * prctl_set_seccomp: configures current->seccomp.mode 882 current->seccomp.mode != SECCOMP_MODE_DISABLED) { seccomp_get_filter()
|
H A D | acct.c | 42 * ->mmap_sem to walk the vma list of current->mm. Nasty, since it leaks 194 struct pid_namespace *ns = task_active_pid_ns(current); acct_on() 288 pin_kill(task_active_pid_ns(current)->bacct); SYSCALL_DEFINE1() 414 struct pacct_struct *pacct = ¤t->signal->pacct; fill_ac() 425 strlcpy(ac->ac_comm, current->comm, sizeof(ac->ac_comm)); fill_ac() 429 run_time -= current->group_leader->start_time; fill_ac() 453 spin_lock_irq(¤t->sighand->siglock); fill_ac() 454 tty = current->signal->tty; /* Safe as we hold the siglock */ fill_ac() 463 spin_unlock_irq(¤t->sighand->siglock); fill_ac() 478 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; do_acct_process() 479 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; do_acct_process() 503 ac.ac_pid = task_tgid_nr_ns(current, ns); do_acct_process() 505 ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), do_acct_process() 521 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; do_acct_process() 532 struct pacct_struct *pacct = ¤t->signal->pacct; acct_collect() 536 if (group_dead && current->mm) { acct_collect() 539 down_read(¤t->mm->mmap_sem); acct_collect() 540 vma = current->mm->mmap; acct_collect() 545 up_read(¤t->mm->mmap_sem); acct_collect() 548 spin_lock_irq(¤t->sighand->siglock); acct_collect() 551 if (thread_group_leader(current)) { acct_collect() 553 if (current->flags & PF_FORKNOEXEC) acct_collect() 556 if (current->flags & PF_SUPERPRIV) acct_collect() 558 if (current->flags & PF_DUMPCORE) acct_collect() 560 if (current->flags & PF_SIGNALED) acct_collect() 562 task_cputime(current, &utime, &stime); acct_collect() 565 pacct->ac_minflt += current->min_flt; acct_collect() 566 pacct->ac_majflt += current->maj_flt; acct_collect() 567 spin_unlock_irq(¤t->sighand->siglock); acct_collect() 592 * This loop is safe lockless, since current is still acct_process() 596 for (ns = task_active_pid_ns(current); ns != NULL; ns = ns->parent) { acct_process()
|
H A D | workqueue_internal.h | 58 * current_wq_worker - return struct worker if %current is a workqueue worker 62 if (current->flags & PF_WQ_WORKER) current_wq_worker() 63 return kthread_data(current); current_wq_worker()
|
H A D | fork.c | 252 WARN_ON(tsk == current); __put_task_struct() 614 if (current->mm) { mm_init() 615 mm->flags = current->mm->flags & MMF_INIT_MASK; mm_init() 616 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; mm_init() 673 return mm_init(mm, current); mm_alloc() 802 if (mm && mm != current->mm && mm_access() 849 * from the current process. 915 struct mm_struct *mm, *oldmm = current->mm; dup_mm() 967 oldmm = current->mm; copy_mm() 996 struct fs_struct *fs = current->fs; copy_fs() 1022 oldf = current->files; copy_files() 1044 struct io_context *ioc = current->io_context; copy_io() 1072 atomic_inc(¤t->sighand->count); copy_sighand() 1081 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); copy_sighand() 1146 task_lock(current->group_leader); copy_signal() 1147 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); copy_signal() 1148 task_unlock(current->group_leader); copy_signal() 1155 sig->oom_score_adj = current->signal->oom_score_adj; copy_signal() 1156 sig->oom_score_adj_min = current->signal->oom_score_adj_min; copy_signal() 1158 sig->has_child_subreaper = current->signal->has_child_subreaper || copy_signal() 1159 current->signal->is_child_subreaper; copy_signal() 1175 assert_spin_locked(¤t->sighand->siglock); copy_seccomp() 1178 get_seccomp_filter(current); copy_seccomp() 1179 p->seccomp = current->seccomp; copy_seccomp() 1186 if (task_no_new_privs(current)) copy_seccomp() 1201 current->clear_child_tid = tidptr; SYSCALL_DEFINE1() 1203 return task_pid_vnr(current); SYSCALL_DEFINE1() 1283 current->signal->flags & SIGNAL_UNKILLABLE) copy_process() 1292 (task_active_pid_ns(current) != copy_process() 1293 current->nsproxy->pid_ns_for_children)) copy_process() 1302 p = dup_task_struct(current); copy_process() 1321 current->flags &= ~PF_NPROC_EXCEEDED; copy_process() 1361 p->default_timer_slack_ns = current->timer_slack_ns; copy_process() 1372 threadgroup_change_begin(current); copy_process() 1505 p->group_leader = current->group_leader; copy_process() 1506 p->tgid = current->tgid; copy_process() 1509 p->exit_signal = current->group_leader->exit_signal; copy_process() 1542 p->real_parent = current->real_parent; copy_process() 1543 p->parent_exec_id = current->parent_exec_id; copy_process() 1545 p->real_parent = current; copy_process() 1546 p->parent_exec_id = current->self_exec_id; copy_process() 1549 spin_lock(¤t->sighand->siglock); copy_process() 1562 * A fatal signal pending means that current will exit, so the new copy_process() 1566 if (signal_pending(current)) { copy_process() 1567 spin_unlock(¤t->sighand->siglock); copy_process() 1578 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); copy_process() 1579 init_task_pid(p, PIDTYPE_SID, task_session(current)); copy_process() 1587 p->signal->tty = tty_kref_get(current->signal->tty); copy_process() 1594 current->signal->nr_threads++; copy_process() 1595 atomic_inc(¤t->signal->live); copy_process() 1596 atomic_inc(¤t->signal->sigcnt); copy_process() 1607 spin_unlock(¤t->sighand->siglock); copy_process() 1613 threadgroup_change_end(current); copy_process() 1654 threadgroup_change_end(current); copy_process() 1718 if (likely(!ptrace_event_enabled(current, trace))) _do_fork() 1732 trace_sched_process_fork(current, p); _do_fork() 1894 if (!thread_group_empty(current)) check_unshare_flags() 1898 if (atomic_read(¤t->sighand->count) > 1) check_unshare_flags() 1914 struct fs_struct *fs = current->fs; unshare_fs() 1935 struct files_struct *fd = current->files; unshare_fd() 1953 * constructed. Here we are modifying the current, active, 2016 exit_sem(current); SYSCALL_DEFINE1() 2020 exit_shm(current); SYSCALL_DEFINE1() 2021 shm_init_task(current); SYSCALL_DEFINE1() 2025 switch_task_namespaces(current, new_nsproxy); SYSCALL_DEFINE1() 2027 task_lock(current); SYSCALL_DEFINE1() 2030 fs = current->fs; SYSCALL_DEFINE1() 2032 current->fs = new_fs; SYSCALL_DEFINE1() 2041 fd = current->files; SYSCALL_DEFINE1() 2042 current->files = new_fd; SYSCALL_DEFINE1() 2046 task_unlock(current); SYSCALL_DEFINE1() 2071 * Helper to unshare the files of the current task. 2078 struct task_struct *task = current; unshare_files()
|
H A D | capability.c | 47 char name[sizeof(current->comm)]; warn_legacy_capability_use() 50 get_task_comm(name, current)); warn_legacy_capability_use() 71 char name[sizeof(current->comm)]; warn_deprecated_v2() 74 get_task_comm(name, current)); warn_deprecated_v2() 111 * The only thing that can change the capabilities of the current 112 * process is the current process. As such, we can't be in this code 122 if (pid && (pid != task_pid_vnr(current))) { cap_get_target_pid() 135 ret = security_capget(current, pEp, pIp, pPp); cap_get_target_pid() 212 * Set capabilities for the current process only. The ability to any other 239 /* may only affect current now */ SYSCALL_DEFINE2() 240 if (pid != 0 && pid != task_pid_vnr(current)) SYSCALL_DEFINE2() 365 * ns_capable - Determine if the current task has a superior capability in effect 369 * Return true if the current task has the given superior capability currently 383 current->flags |= PF_SUPERPRIV; ns_capable() 392 * capable - Determine if the current task has a superior capability in effect 395 * Return true if the current task has the given superior capability currently 438 * Return true if the current task has the given capability targeted at 440 * mapped into the current user namespace.
|
/linux-4.4.14/arch/arm64/kernel/ |
H A D | fpsimd.c | 48 * the id of the current CPU everytime the state is loaded onto a CPU. For (b), 60 * indicate whether or not the userland FPSIMD state of the current task is 62 * CPU currently contain the most recent userland FPSIMD state of the current 67 * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu 73 * fpsimd_state.cpu field is set to the id of the current CPU, the current 102 * Raise a SIGFPE for the current process. 125 send_sig_info(SIGFPE, &info, current); do_fpsimd_exc() 131 * Save the current FPSIMD state to memory, but only if whatever is in fpsimd_thread_switch() 133 * 'current'. fpsimd_thread_switch() 135 if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_thread_switch() 136 fpsimd_save_state(¤t->thread.fpsimd_state); fpsimd_thread_switch() 160 memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_flush_thread() 161 fpsimd_flush_task_state(current); fpsimd_flush_thread() 166 * Save the userland FPSIMD state of 'current' to memory, but only if the state 167 * currently held in the registers does in fact belong to 'current' 173 fpsimd_save_state(¤t->thread.fpsimd_state); fpsimd_preserve_current_state() 178 * Load the userland FPSIMD state of 'current' from memory, but only if the 180 * state of 'current' 186 struct fpsimd_state *st = ¤t->thread.fpsimd_state; fpsimd_restore_current_state() 196 * Load an updated userland FPSIMD state for 'current' from memory and set the 198 * FPSIMD state of 'current' 205 struct fpsimd_state *st = ¤t->thread.fpsimd_state; fpsimd_update_current_state() 245 if (current->mm && kernel_neon_begin_partial() 247 fpsimd_save_state(¤t->thread.fpsimd_state); kernel_neon_begin_partial() 273 if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_cpu_pm_notifier() 274 fpsimd_save_state(¤t->thread.fpsimd_state); fpsimd_cpu_pm_notifier() 278 if (current->mm) fpsimd_cpu_pm_notifier()
|
H A D | perf_regs.c | 58 regs_user->regs = task_pt_regs(current); perf_get_regs_user() 59 regs_user->abi = perf_reg_abi(current); perf_get_regs_user()
|
/linux-4.4.14/arch/score/kernel/ |
H A D | traps.c | 147 current->comm, current->pid, (unsigned long) current); show_registers() 178 force_sig(SIGBUS, current); do_adelinsn() 192 force_sig(SIGBUS, current); do_adedata() 198 force_sig(SIGFPE, current); do_pel() 204 force_sig(SIGFPE, current); do_cee() 210 force_sig(SIGFPE, current); do_cpe() 216 force_sig(SIGBUS, current); do_be() 229 force_sig_info(SIGFPE, &info, current); do_ov() 235 force_sig(SIGTRAP, current); do_tr() 243 read_tsk_long(current, epc, &epc_insn); do_ri() 244 if (current->thread.single_step == 1) { do_ri() 245 if ((epc == current->thread.addr1) || do_ri() 246 (epc == current->thread.addr2)) { do_ri() 247 user_disable_single_step(current); do_ri() 248 force_sig(SIGTRAP, current); do_ri() 255 force_sig(SIGTRAP, current); do_ri() 259 force_sig(SIGILL, current); do_ri() 266 force_sig(SIGILL, current); do_ccu() 339 current->active_mm = &init_mm; trap_init()
|
/linux-4.4.14/security/integrity/ |
H A D | integrity_audit.c | 41 ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno); integrity_audit_msg() 43 task_pid_nr(current), integrity_audit_msg() 45 from_kuid(&init_user_ns, audit_get_loginuid(current)), integrity_audit_msg() 46 audit_get_sessionid(current)); integrity_audit_msg() 53 audit_log_untrustedstring(ab, get_task_comm(name, current)); integrity_audit_msg()
|
/linux-4.4.14/arch/mips/kernel/ |
H A D | pm.c | 34 save_dsp(current); mips_cpu_save() 48 if (current->mm) mips_cpu_restore() 49 write_c0_entryhi(cpu_asid(cpu, current->mm)); mips_cpu_restore() 52 restore_dsp(current); mips_cpu_restore() 59 __restore_watch(current); mips_cpu_restore()
|
H A D | stacktrace.c | 71 save_stack_trace_tsk(current, trace); save_stack_trace() 82 if (tsk != current) { save_stack_trace_tsk() 88 save_context_stack(trace, tsk, regs, tsk == current); save_stack_trace_tsk()
|
H A D | traps.c | 145 task = current; show_backtrace() 202 if (task && task != current) { show_stack() 355 current->comm, current->pid, current_thread_info(), current, show_registers() 368 show_stacktrace(current, regs); show_registers() 383 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, die() 407 if (regs && kexec_should_crash(current)) die() 469 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr, do_be() 474 force_sig(SIGBUS, current); do_be() 535 if (ll_task == NULL || ll_task == current) { simulate_ll() 540 ll_task = current; simulate_ll() 574 if (ll_bit == 0 || ll_task != current) { simulate_sc() 619 struct thread_info *ti = task_thread_info(current); simulate_rdhwr() 702 force_sig_info(SIGFPE, &info, current); do_ov() 735 force_sig_info(sig, &si, current); process_fpemu_return() 742 force_sig_info(sig, &si, current); process_fpemu_return() 748 down_read(¤t->mm->mmap_sem); process_fpemu_return() 749 if (find_vma(current->mm, (unsigned long)fault_addr)) process_fpemu_return() 753 up_read(¤t->mm->mmap_sem); process_fpemu_return() 754 force_sig_info(sig, &si, current); process_fpemu_return() 758 force_sig(sig, current); process_fpemu_return() 796 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, simulate_fp() 798 fcr31 = current->thread.fpu.fcr31; simulate_fp() 804 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; simulate_fp() 825 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, do_fpe() 850 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, do_fpe() 852 fcr31 = current->thread.fpu.fcr31; do_fpe() 858 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; do_fpe() 881 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, do_trap_or_bp() 886 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr, do_trap_or_bp() 907 force_sig_info(SIGFPE, &info, current); do_trap_or_bp() 911 force_sig(SIGTRAP, current); do_trap_or_bp() 926 force_sig(SIGTRAP, current); do_trap_or_bp() 931 force_sig(SIGTRAP, current); do_trap_or_bp() 947 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; do_bp() 989 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) do_bp() 995 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) do_bp() 1001 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) do_bp() 1007 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) do_bp() 1023 force_sig(SIGSEGV, current); do_bp() 1040 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; do_tr() 1065 force_sig(SIGSEGV, current); do_tr() 1091 task_thread_info(current)->r2_emul_return = 1; do_ri() 1097 ¤t->thread.cp0_baduaddr, do_ri() 1099 task_thread_info(current)->r2_emul_return = 1; do_ri() 1107 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; do_ri() 1109 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, do_ri() 1152 force_sig(status, current); do_ri() 1168 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { mt_ase_fp_affinity() 1174 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) { mt_ase_fp_affinity() 1177 current->thread.user_cpus_allowed mt_ase_fp_affinity() 1178 = current->cpus_allowed; mt_ase_fp_affinity() 1179 cpumask_and(&tmask, ¤t->cpus_allowed, mt_ase_fp_affinity() 1181 set_cpus_allowed_ptr(current, &tmask); mt_ase_fp_affinity() 1210 force_sig(SIGILL, current); default_cu2_call() 1235 wait_on_atomic_t(¤t->mm->context.fp_mode_switching, enable_restore_fp_context() 1295 write_msa_csr(current->thread.fpu.msacsr); enable_restore_fp_context() 1317 _restore_fp(current); enable_restore_fp_context() 1327 restore_msa(current); enable_restore_fp_context() 1332 current->thread.fpu.fcr31); enable_restore_fp_context() 1399 force_sig(status, current); do_cpu() 1418 force_sig(SIGILL, current); do_cpu() 1429 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, do_cpu() 1431 fcr31 = current->thread.fpu.fcr31; do_cpu() 1437 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X; do_cpu() 1458 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; do_msa_fpe() 1460 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) do_msa_fpe() 1468 force_sig(SIGFPE, current); do_msa_fpe() 1481 force_sig(SIGILL, current); do_msa() 1489 force_sig(SIGILL, current); do_msa() 1499 force_sig(SIGILL, current); do_mdmx() 1521 * If the current thread has the watch registers loaded, save do_watch() 1525 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { do_watch() 1528 force_sig(SIGTRAP, current); do_watch() 1599 force_sig(SIGILL, current); do_mt() 1608 force_sig(SIGILL, current); do_dsp() 2145 current->active_mm = &init_mm; per_cpu_trap_init() 2146 BUG_ON(current->mm); per_cpu_trap_init() 2147 enter_lazy_tlb(&init_mm, current); per_cpu_trap_init() 2308 * current list of targets for Linux/MIPS. trap_init()
|
H A D | signal.c | 71 struct mips_abi *abi = current->thread.abi; copy_fp_to_sigcontext() 80 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), copy_fp_to_sigcontext() 83 err |= __put_user(current->thread.fpu.fcr31, csr); copy_fp_to_sigcontext() 90 struct mips_abi *abi = current->thread.abi; copy_fp_from_sigcontext() 100 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); copy_fp_from_sigcontext() 102 err |= __get_user(current->thread.fpu.fcr31, csr); copy_fp_from_sigcontext() 112 struct mips_abi *abi = current->thread.abi; save_hw_fp_context() 121 struct mips_abi *abi = current->thread.abi; restore_hw_fp_context() 177 err = __put_user(current->thread.fpu.msacsr, &msa->csr); save_msa_extcontext() 180 val = get_fpr64(¤t->thread.fpu.fpr[i], 1); save_msa_extcontext() 226 current->thread.fpu.msacsr = csr; restore_msa_extcontext() 230 set_fpr64(¤t->thread.fpu.fpr[i], 1, val); restore_msa_extcontext() 298 struct mips_abi *abi = current->thread.abi; protected_save_fp_context() 351 struct mips_abi *abi = current->thread.abi; protected_restore_fp_context() 439 * will "inherit" current FPU state. setup_sigcontext() 455 * the extended context for the current task at the current time. extcontext_max_size() 494 current->restart_block.fn = do_no_restart_syscall; restore_sigcontext() 613 force_sig(sig, current); sys_sigreturn() 626 force_sig(SIGSEGV, current); sys_sigreturn() 648 force_sig(sig, current); sys_rt_sigreturn() 664 force_sig(SIGSEGV, current); sys_rt_sigreturn() 701 current->comm, current->pid, setup_frame() 748 current->comm, current->pid, setup_rt_frame() 772 struct mips_abi *abi = current->thread.abi; handle_signal() 773 void *vdso = current->mm->context.vdso; handle_signal() 827 regs->regs[2] = current->thread.abi->restart; do_signal()
|
/linux-4.4.14/arch/s390/kernel/ |
H A D | runtime_instr.c | 23 struct pt_regs *regs = task_pt_regs(current); disable_runtime_instr() 48 struct task_struct *task = current; exit_thread_runtime_instr() 74 if (!current->thread.ri_cb) { SYSCALL_DEFINE1() 79 cb = current->thread.ri_cb; SYSCALL_DEFINE1() 87 current->thread.ri_cb = cb; SYSCALL_DEFINE1()
|
H A D | uprobes.c | 36 regs->psw.addr = current->utask->xol_vaddr; arch_uprobe_pre_xol() 37 set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); arch_uprobe_pre_xol() 38 update_cr_regs(current); arch_uprobe_pre_xol() 68 regs->psw.addr >= current->thread.per_user.start && check_per_event() 69 regs->psw.addr <= current->thread.per_user.end) check_per_event() 78 struct uprobe_task *utask = current->utask; arch_uprobe_post_xol() 80 clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); arch_uprobe_post_xol() 81 update_cr_regs(current); arch_uprobe_post_xol() 98 if (check_per_event(current->thread.per_event.cause, arch_uprobe_post_xol() 99 current->thread.per_user.control, regs)) { arch_uprobe_post_xol() 101 current->thread.per_event.address = utask->vaddr; arch_uprobe_post_xol() 136 regs->psw.addr = current->utask->vaddr; arch_uprobe_abort_xol() 137 current->thread.per_event.address = current->utask->vaddr; arch_uprobe_abort_xol() 242 if (!(current->thread.per_user.control & PER_EVENT_STORE)) sim_stor_event() 244 if ((void *)current->thread.per_user.start > (addr + len)) sim_stor_event() 246 if ((void *)current->thread.per_user.end < addr) sim_stor_event() 248 current->thread.per_event.address = regs->psw.addr; sim_stor_event() 249 current->thread.per_event.cause = PER_EVENT_STORE >> 16; sim_stor_event()
|
H A D | processor.c | 41 current->active_mm = &init_mm; cpu_init() 42 BUG_ON(current->mm); cpu_init() 43 enter_lazy_tlb(&init_mm, current); cpu_init()
|
H A D | traps.c | 32 address = *(unsigned long *)(current->thread.trap_tdb + 24); get_trap_ip() 41 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) report_user_fault() 43 if (!unhandled_signal(current, signr)) report_user_fault() 68 force_sig_info(si_signo, &info, current); do_report_trap() 101 if (!current->ptrace) do_per_trap() 107 (void __force __user *) current->thread.per_event.address; do_per_trap() 108 force_sig_info(SIGTRAP, &info, current); do_per_trap() 194 if (current->ptrace) { illegal_op() 199 force_sig_info(SIGTRAP, &info, current); illegal_op() 238 vic = (current->thread.fpu.fpc & 0xf00) >> 8; vector_exception() 269 if (current->thread.fpu.fpc & FPC_DXC_MASK) data_exception() 274 do_fp_trap(regs, current->thread.fpu.fpc); data_exception()
|
H A D | signal.c | 107 save_access_regs(current->thread.acrs); store_sigregs() 114 restore_access_regs(current->thread.acrs); load_sigregs() 128 memcpy(&user_sregs.regs.acrs, current->thread.acrs, save_sigregs() 130 fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); save_sigregs() 141 current->restart_block.fn = do_no_restart_syscall; restore_sigregs() 146 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI)) restore_sigregs() 165 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, restore_sigregs() 166 sizeof(current->thread.acrs)); restore_sigregs() 168 fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); restore_sigregs() 184 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); save_sigregs_ext() 188 current->thread.fpu.vxrs + __NUM_VXRS_LOW, save_sigregs_ext() 205 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, restore_sigregs_ext() 210 *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i]; restore_sigregs_ext() 217 struct pt_regs *regs = task_pt_regs(current); SYSCALL_DEFINE0() 233 force_sig(SIGSEGV, current); SYSCALL_DEFINE0() 239 struct pt_regs *regs = task_pt_regs(current); SYSCALL_DEFINE0() 257 force_sig(SIGSEGV, current); SYSCALL_DEFINE0() 279 sp = current->sas_ss_sp + current->sas_ss_size; get_sigframe() 362 regs->gprs[6] = task_thread_info(current)->last_break; setup_frame() 434 regs->gprs[5] = task_thread_info(current)->last_break; setup_rt_frame()
|
H A D | process.c | 71 * Free current thread data structures etc.. 109 * Save the floating-point or vector register state of the current arch_dup_task_struct() 114 dst->thread.fpu.fpc = current->thread.fpu.fpc; arch_dup_task_struct() 115 memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size); arch_dup_task_struct() 191 current->thread.fpu.fpc = 0; execve_tail() 201 fpregs->fpc = current->thread.fpu.fpc; dump_fpu() 205 current->thread.fpu.vxrs); dump_fpu() 207 memcpy(&fpregs->fprs, current->thread.fpu.fprs, dump_fpu() 219 if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p)) get_wchan() 239 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) arch_align_stack()
|
/linux-4.4.14/arch/mips/netlogic/xlp/ |
H A D | cop2-ex.c | 19 #include <asm/current.h> 100 KSTK_STATUS(current) |= ST0_CU2; nlm_cu2_call() 103 nlm_cop2_restore(&(current->thread.cp2)); nlm_cu2_call() 107 current->pid, current->comm); nlm_cu2_call()
|
/linux-4.4.14/fs/ |
H A D | signalfd.c | 65 poll_wait(file, ¤t->sighand->signalfd_wqh, wait); signalfd_poll() 67 spin_lock_irq(¤t->sighand->siglock); signalfd_poll() 68 if (next_signal(¤t->pending, &ctx->sigmask) || signalfd_poll() 69 next_signal(¤t->signal->shared_pending, signalfd_poll() 72 spin_unlock_irq(¤t->sighand->siglock); signalfd_poll() 163 DECLARE_WAITQUEUE(wait, current); signalfd_dequeue() 165 spin_lock_irq(¤t->sighand->siglock); signalfd_dequeue() 166 ret = dequeue_signal(current, &ctx->sigmask, info); signalfd_dequeue() 173 spin_unlock_irq(¤t->sighand->siglock); signalfd_dequeue() 177 add_wait_queue(¤t->sighand->signalfd_wqh, &wait); signalfd_dequeue() 180 ret = dequeue_signal(current, &ctx->sigmask, info); signalfd_dequeue() 183 if (signal_pending(current)) { signalfd_dequeue() 187 spin_unlock_irq(¤t->sighand->siglock); signalfd_dequeue() 189 spin_lock_irq(¤t->sighand->siglock); signalfd_dequeue() 191 spin_unlock_irq(¤t->sighand->siglock); signalfd_dequeue() 193 remove_wait_queue(¤t->sighand->signalfd_wqh, &wait); signalfd_dequeue() 298 spin_lock_irq(¤t->sighand->siglock); SYSCALL_DEFINE4() 300 spin_unlock_irq(¤t->sighand->siglock); SYSCALL_DEFINE4() 302 wake_up(¤t->sighand->signalfd_wqh); SYSCALL_DEFINE4()
|
H A D | binfmt_aout.c | 38 * Routine writes a core dump image in the current directory. 42 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" 64 strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); aout_core_dump() 175 current->mm->arg_start = (unsigned long) p; create_aout_tables() 184 current->mm->arg_end = current->mm->env_start = (unsigned long) p; create_aout_tables() 193 current->mm->env_end = (unsigned long) p; create_aout_tables() 251 current->mm->end_code = ex.a_text + load_aout_binary() 252 (current->mm->start_code = N_TXTADDR(ex)); load_aout_binary() 253 current->mm->end_data = ex.a_data + load_aout_binary() 254 (current->mm->start_data = N_DATADDR(ex)); load_aout_binary() 255 current->mm->brk = ex.a_bss + load_aout_binary() 256 (current->mm->start_brk = N_BSSADDR(ex)); load_aout_binary() 324 retval = set_brk(current->mm->start_brk, current->mm->brk); load_aout_binary() 328 current->mm->start_stack = load_aout_binary() 333 start_thread(regs, ex.a_entry, current->mm->start_stack); load_aout_binary()
|
H A D | drop_caches.c | 63 current->comm, task_pid_nr(current), drop_caches_sysctl_handler()
|
H A D | binfmt_elf_fdpic.c | 202 kdebug("____ LOAD %d ____", current->pid); load_elf_fdpic_binary() 355 current->personality |= READ_IMPLIES_EXEC; load_elf_fdpic_binary() 361 current->mm->start_code = 0; load_elf_fdpic_binary() 362 current->mm->end_code = 0; load_elf_fdpic_binary() 363 current->mm->start_stack = 0; load_elf_fdpic_binary() 364 current->mm->start_data = 0; load_elf_fdpic_binary() 365 current->mm->end_data = 0; load_elf_fdpic_binary() 366 current->mm->context.exec_fdpic_loadmap = 0; load_elf_fdpic_binary() 367 current->mm->context.interp_fdpic_loadmap = 0; load_elf_fdpic_binary() 372 ¤t->mm->start_stack, load_elf_fdpic_binary() 373 ¤t->mm->start_brk); load_elf_fdpic_binary() 375 retval = setup_arg_pages(bprm, current->mm->start_stack, load_elf_fdpic_binary() 382 retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, load_elf_fdpic_binary() 389 current->mm, "interpreter"); load_elf_fdpic_binary() 401 if (!current->mm->start_brk) load_elf_fdpic_binary() 402 current->mm->start_brk = current->mm->end_data; load_elf_fdpic_binary() 404 current->mm->brk = current->mm->start_brk = load_elf_fdpic_binary() 405 PAGE_ALIGN(current->mm->start_brk); load_elf_fdpic_binary() 418 current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot, load_elf_fdpic_binary() 423 if (IS_ERR_VALUE(current->mm->start_brk)) { load_elf_fdpic_binary() 424 retval = current->mm->start_brk; load_elf_fdpic_binary() 425 current->mm->start_brk = 0; load_elf_fdpic_binary() 429 current->mm->brk = current->mm->start_brk; load_elf_fdpic_binary() 430 current->mm->context.end_brk = current->mm->start_brk; load_elf_fdpic_binary() 431 current->mm->start_stack = current->mm->start_brk + stack_size; load_elf_fdpic_binary() 435 if (create_elf_fdpic_tables(bprm, current->mm, load_elf_fdpic_binary() 439 kdebug("- start_code %lx", current->mm->start_code); load_elf_fdpic_binary() 440 kdebug("- end_code %lx", current->mm->end_code); load_elf_fdpic_binary() 441 kdebug("- start_data %lx", current->mm->start_data); load_elf_fdpic_binary() 442 kdebug("- end_data %lx", current->mm->end_data); load_elf_fdpic_binary() 443 kdebug("- start_brk %lx", current->mm->start_brk); load_elf_fdpic_binary() 444 kdebug("- brk %lx", current->mm->brk); load_elf_fdpic_binary() 445 kdebug("- start_stack %lx", current->mm->start_stack); load_elf_fdpic_binary() 461 start_thread(regs, entryaddr, current->mm->start_stack); load_elf_fdpic_binary() 565 current->mm->context.exec_fdpic_loadmap = (unsigned long) sp; create_elf_fdpic_tables() 578 current->mm->context.interp_fdpic_loadmap = (unsigned long) sp; create_elf_fdpic_tables() 679 current->mm->arg_start = bprm->p; create_elf_fdpic_tables() 681 current->mm->arg_start = current->mm->start_stack - create_elf_fdpic_tables() 685 p = (char __user *) current->mm->arg_start; create_elf_fdpic_tables() 694 current->mm->arg_end = (unsigned long) p; create_elf_fdpic_tables() 697 current->mm->env_start = (unsigned long) p; create_elf_fdpic_tables() 706 current->mm->env_end = (unsigned long) p; create_elf_fdpic_tables() 1521 for (vma = current->mm->mmap; vma; vma = vma->vm_next) { elf_fdpic_dump_segments() 1557 for (vma = current->mm->mmap; vma; vma = vma->vm_next) elf_core_vma_data_size() 1603 * or the mmap / vma pages are now blocked in do_exit on current elf_fdpic_core_dump() 1633 for (ct = current->mm->core_state->dumper.next; elf_fdpic_core_dump() 1652 /* now collect the dump for the current */ elf_fdpic_core_dump() 1653 fill_prstatus(prstatus, current, cprm->siginfo->si_signo); elf_fdpic_core_dump() 1656 segs = current->mm->map_count; elf_fdpic_core_dump() 1677 fill_psinfo(psinfo, current->group_leader, current->mm); elf_fdpic_core_dump() 1682 auxv = (elf_addr_t *) current->mm->saved_auxv; elf_fdpic_core_dump() 1693 elf_core_copy_task_fpregs(current, cprm->regs, fpu))) elf_fdpic_core_dump() 1697 if (elf_core_copy_task_xfpregs(current, xfpu)) elf_fdpic_core_dump() 1748 for (vma = current->mm->mmap; vma; vma = vma->vm_next) { elf_fdpic_core_dump()
|
H A D | fs_struct.c | 133 struct fs_struct *fs = current->fs; unshare_fs_struct() 140 task_lock(current); unshare_fs_struct() 143 current->fs = new_fs; unshare_fs_struct() 145 task_unlock(current); unshare_fs_struct() 156 return current->fs->umask; current_umask()
|
H A D | coredump.c | 136 exe_file = get_mm_exe_file(current->mm); cn_print_exe_file() 138 return cn_esc_printf(cn, "%s (path unknown)", current->comm); cn_print_exe_file() 200 task_tgid_vnr(current)); format_corename() 205 task_tgid_nr(current)); format_corename() 209 task_pid_vnr(current)); format_corename() 213 task_pid_nr(current)); format_corename() 252 err = cn_esc_printf(cn, "%s", current->comm); format_corename() 279 err = cn_printf(cn, ".%d", task_tgid_vnr(current)); format_corename() 298 if (t != current && t->mm) { for_each_thread() 347 * It does list_replace_rcu(&leader->tasks, ¤t->tasks), zap_threads() 386 struct task_struct *tsk = current; coredump_wait() 423 spin_lock_irq(¤t->sighand->siglock); coredump_finish() 424 if (core_dumped && !__fatal_signal_pending(current)) coredump_finish() 425 current->signal->group_exit_code |= 0x80; coredump_finish() 426 current->signal->group_exit_task = NULL; coredump_finish() 427 current->signal->flags = SIGNAL_GROUP_EXIT; coredump_finish() 428 spin_unlock_irq(¤t->sighand->siglock); coredump_finish() 454 return signal_pending(current); dump_interrupted() 504 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; umh_pipe_setup() 513 struct mm_struct *mm = current->mm; do_coredump() 596 task_tgid_vnr(current), current->comm); do_coredump() 605 task_tgid_vnr(current), current->comm); do_coredump() 642 task_tgid_vnr(current), current->comm); do_coredump() 676 * their current->fs->root to point to arbitrary do_coredump() 680 * current->fs->root must not be used. Instead, use the do_coredump()
|
H A D | exec.c | 13 * "current->executable", and page faults do the actual loading. Clean. 19 * current->executable is only used by the procfs. This allows a dispatch 172 * use a lot of memory, account these pages in current->mm temporary 178 struct mm_struct *mm = current->mm; acct_arg_size() 201 ret = get_user_pages(current, bprm->mm, pos, get_arg_page() 226 rlim = current->signal->rlim; get_arg_page() 447 if (fatal_signal_pending(current)) count() 494 if (fatal_signal_pending(current)) { copy_strings() 653 struct mm_struct *mm = current->mm; setup_arg_pages() 747 current->mm->start_stack = bprm->p; setup_arg_pages() 849 tsk = current; exec_mmap() 850 old_mm = current->mm; exec_mmap() 888 * This function makes sure the current process has its own signal table, 1090 retval = de_thread(current); flush_old_exec() 1112 current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | flush_old_exec() 1115 current->personality &= ~bprm->per_clear; flush_old_exec() 1133 arch_pick_mmap_layout(current->mm); setup_new_exec() 1136 current->sas_ss_sp = current->sas_ss_size = 0; setup_new_exec() 1139 set_dumpable(current->mm, SUID_DUMP_USER); setup_new_exec() 1141 set_dumpable(current->mm, suid_dumpable); setup_new_exec() 1144 __set_task_comm(current, kbasename(bprm->filename), true); setup_new_exec() 1150 current->mm->task_size = TASK_SIZE; setup_new_exec() 1155 current->pdeath_signal = 0; setup_new_exec() 1159 set_dumpable(current->mm, suid_dumpable); setup_new_exec() 1164 current->self_exec_id++; setup_new_exec() 1165 flush_signal_handlers(current, 0); setup_new_exec() 1166 do_close_on_exec(current->files); setup_new_exec() 1178 if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) prepare_bprm_creds() 1185 mutex_unlock(¤t->signal->cred_guard_mutex); prepare_bprm_creds() 1193 mutex_unlock(¤t->signal->cred_guard_mutex); free_bprm() 1234 if (get_dumpable(current->mm) != SUID_DUMP_USER) install_exec_creds() 1235 perf_event_exit_task(current); install_exec_creds() 1242 mutex_unlock(¤t->signal->cred_guard_mutex); install_exec_creds() 1253 struct task_struct *p = current, *t; check_unsafe_exec() 1267 if (task_no_new_privs(current)) check_unsafe_exec() 1301 if (task_no_new_privs(current)) bprm_fill_uid() 1435 force_sigsegv(SIGSEGV, current); search_binary_handler() 1465 old_pid = current->pid; exec_binprm() 1467 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); exec_binprm() 1473 trace_sched_process_exec(current, old_pid, bprm); exec_binprm() 1475 proc_exec_connector(current); exec_binprm() 1504 if ((current->flags & PF_NPROC_EXCEEDED) && do_execveat_common() 1512 current->flags &= ~PF_NPROC_EXCEEDED; do_execveat_common() 1528 current->in_execve = 1; do_execveat_common() 1553 * current->files (due to unshare_files above). do_execveat_common() 1555 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) do_execveat_common() 1595 current->fs->in_exec = 0; do_execveat_common() 1596 current->in_execve = 0; do_execveat_common() 1597 acct_update_integrals(current); do_execveat_common() 1598 task_numa_free(current); do_execveat_common() 1613 current->fs->in_exec = 0; do_execveat_common() 1614 current->in_execve = 0; do_execveat_common() 1683 struct mm_struct *mm = current->mm; set_binfmt()
|
/linux-4.4.14/arch/x86/um/ |
H A D | sysrq_64.c | 11 #include <asm/current.h> 19 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current), show_regs() 20 current->comm, print_tainted(), init_utsname()->release); show_regs()
|
H A D | syscalls_64.c | 35 ret = restore_registers(pid, ¤t->thread.regs.regs); arch_prctl() 58 current->thread.arch.fs = (unsigned long) ptr; arch_prctl() 59 ret = save_registers(pid, ¤t->thread.regs.regs); arch_prctl() 62 ret = save_registers(pid, ¤t->thread.regs.regs); arch_prctl() 77 return arch_prctl(current, code, (unsigned long __user *) addr); sys_arch_prctl()
|
/linux-4.4.14/lib/ |
H A D | smp_processor_id.c | 25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) check_preemption_disabled() 43 what1, what2, preempt_count() - 1, current->comm, current->pid); check_preemption_disabled()
|
/linux-4.4.14/fs/reiserfs/ |
H A D | lock.c | 25 if (sb_i->lock_owner != current) { reiserfs_write_lock() 27 sb_i->lock_owner = current; reiserfs_write_lock() 30 /* No need to protect it, only the current task touches it */ reiserfs_write_lock() 43 BUG_ON(sb_i->lock_owner != current); reiserfs_write_unlock() 57 if (sb_i->lock_owner != current) reiserfs_write_unlock_nested() 78 sb_i->lock_owner = current; reiserfs_write_lock_nested()
|
/linux-4.4.14/include/linux/mfd/ |
H A D | max14577.h | 71 /* current control GPIOs */ 75 /* current control GPIO control function */ 85 * Valid limits of current for max14577 and max77836 chargers. 90 /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */ 93 * Minimal current when high setting is active, 99 /* Maximum current of high setting, uA */
|
H A D | abx500.h | 77 * @high_curr_time: Time current has to be high to go to recovery 79 * @accu_high_curr: FG accumulation time in high current mode 80 * @high_curr_threshold: High current threshold, in mA 124 * @maxi_chg_curr: Maximum charger current allowed 125 * @maxi_wait_cycles: cycles to wait before setting charger current 126 * @charger_curr_step delta between two charger current settings (mA) 143 * @termination_curr battery charging termination current in mA 147 * @normal_cur_lvl: charger current in normal state in mA 149 * @maint_a_cur_lvl: charger current in maintenance A state in mA 152 * @maint_b_cur_lvl: charger current in maintenance B state in mA 155 * @low_high_cur_lvl: charger current in temp low/high state in mA 212 * @usb_curr_max: maximum allowed USB charger current in mA 214 * @ac_curr_max: maximum allowed AC charger current in mA 235 * @bkup_bat_i current which we charge the backup battery with 251 * @chg_output_curr charger output current level map 252 * @chg_input_curr charger input current level map
|
/linux-4.4.14/drivers/staging/sm750fb/ |
H A D | ddk750_power.h | 23 * This function sets the current power mode 28 * This function sets current gate
|
H A D | ddk750_chip.h | 45 * 0 = keep the current clock setting 50 * 0 = keep the current clock setting 55 * 0 = keep the current clock setting
|
/linux-4.4.14/arch/um/include/asm/ |
H A D | stacktrace.h | 20 if (!task || task == current) get_frame_pointer() 35 if (!task || task == current) get_stack_pointer()
|
/linux-4.4.14/arch/x86/math-emu/ |
H A D | fpu_system.h | 29 mutex_lock(¤t->mm->context.lock); FPU_get_ldt_descriptor() 30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size) FPU_get_ldt_descriptor() 31 ret = current->mm->context.ldt->entries[seg]; FPU_get_ldt_descriptor() 32 mutex_unlock(¤t->mm->context.lock); FPU_get_ldt_descriptor() 49 #define I387 (¤t->thread.fpu.state) 62 /* nz if ip_offset and cs_selector are not to be set for the current 67 /* Number of bytes of data which can be legally accessed by the current
|
/linux-4.4.14/arch/metag/include/asm/ |
H A D | cachepart.h | 23 * Returns the size of the current thread's global dcache partition. 30 * Returns the size of the current thread's global icache partition.
|
H A D | tlbflush.h | 12 * - flush_tlb() flushes the current mm struct TLBs 27 /* flush TLB entries for just the current hardware thread */ __flush_tlb() 48 if (mm == current->active_mm) flush_tlb_mm()
|
/linux-4.4.14/arch/powerpc/math-emu/ |
H A D | math.c | 331 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 332 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu() 333 op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu() 337 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 338 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu() 339 op2 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); do_mathemu() 343 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 344 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu() 345 op2 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu() 346 op3 = (void *)¤t->thread.TS_FPR((insn >> 6) & 0x1f); do_mathemu() 352 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 362 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 367 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 371 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 372 op1 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu() 376 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 377 op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu() 382 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 391 op0 = (void *)¤t->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu() 399 op2 = (void *)¤t->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu() 400 op3 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu() 420 op1 = (void *)¤t->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu() 432 flush_fp_to_thread(current); do_mathemu()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | user_pages.c | 69 * Call with current->mm->mmap_sem held. 86 ret = get_user_pages(current, current->mm, __hfi1_get_user_pages() 94 current->mm->pinned_vm += num_pages; __hfi1_get_user_pages() 136 down_write(¤t->mm->mmap_sem); hfi1_get_user_pages() 140 up_write(¤t->mm->mmap_sem); hfi1_get_user_pages() 147 if (current->mm) /* during close after signal, mm can be NULL */ hfi1_release_user_pages() 148 down_write(¤t->mm->mmap_sem); hfi1_release_user_pages() 152 if (current->mm) { hfi1_release_user_pages() 153 current->mm->pinned_vm -= num_pages; hfi1_release_user_pages() 154 up_write(¤t->mm->mmap_sem); hfi1_release_user_pages()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_user_pages.c | 52 * Call with current->mm->mmap_sem held. 69 ret = get_user_pages(current, current->mm, __qib_get_user_pages() 77 current->mm->pinned_vm += num_pages; __qib_get_user_pages() 137 down_write(¤t->mm->mmap_sem); qib_get_user_pages() 141 up_write(¤t->mm->mmap_sem); qib_get_user_pages() 148 if (current->mm) /* during close after signal, mm can be NULL */ qib_release_user_pages() 149 down_write(¤t->mm->mmap_sem); qib_release_user_pages() 153 if (current->mm) { qib_release_user_pages() 154 current->mm->pinned_vm -= num_pages; qib_release_user_pages() 155 up_write(¤t->mm->mmap_sem); qib_release_user_pages()
|
/linux-4.4.14/arch/x86/um/asm/ |
H A D | vm-flags.h | 14 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
|
/linux-4.4.14/arch/alpha/lib/ |
H A D | dbg_current.S | 5 * Trap if we find current not correct.
|
/linux-4.4.14/fs/proc/ |
H A D | thread_self.c | 13 pid_t tgid = task_tgid_nr_ns(current, ns); proc_thread_self_readlink() 14 pid_t pid = task_pid_nr_ns(current, ns); proc_thread_self_readlink() 25 pid_t tgid = task_tgid_nr_ns(current, ns); proc_thread_self_follow_link() 26 pid_t pid = task_pid_nr_ns(current, ns); proc_thread_self_follow_link()
|
H A D | task_nommu.c | 54 if (current->fs && current->fs->users > 1) task_mem() 55 sbytes += kobjsize(current->fs); task_mem() 57 bytes += kobjsize(current->fs); task_mem() 59 if (current->files && atomic_read(¤t->files->count) > 1) task_mem() 60 sbytes += kobjsize(current->files); task_mem() 62 bytes += kobjsize(current->files); task_mem() 64 if (current->sighand && atomic_read(¤t->sighand->count) > 1) task_mem() 65 sbytes += kobjsize(current->sighand); task_mem() 67 bytes += kobjsize(current->sighand); task_mem() 69 bytes += kobjsize(current); /* includes kernel stack */ task_mem()
|
/linux-4.4.14/include/linux/sunrpc/ |
H A D | types.h | 20 #define signalled() (signal_pending(current))
|
/linux-4.4.14/arch/mips/cavium-octeon/crypto/ |
H A D | octeon-crypto.c | 22 * @state: Pointer to state structure to store current COP2 state in. 35 if (KSTK_STATUS(current) & ST0_CU2) { octeon_crypto_enable() 36 octeon_cop2_save(&(current->thread.cp2)); octeon_crypto_enable() 37 KSTK_STATUS(current) &= ~ST0_CU2; octeon_crypto_enable()
|
/linux-4.4.14/arch/blackfin/include/asm/ |
H A D | page.h | 15 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
|
/linux-4.4.14/arch/cris/arch-v10/kernel/ |
H A D | traps.c | 20 * It's possible to use either the USP register or current->thread.usp. show_registers() 21 * USP might not correspond to the current process for all cases this show_registers() 22 * function is called, and current->thread.usp isn't up to date for the show_registers() 23 * current process. Experience shows that using USP is the way to go. show_registers() 45 current->comm, current->pid, (unsigned long)current); show_registers()
|
/linux-4.4.14/arch/sh/kernel/ |
H A D | traps.c | 31 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, die() 32 task_pid_nr(current), task_stack_page(current) + 1); die() 36 (unsigned long)task_stack_page(current)); die() 45 if (kexec_should_crash(current)) die() 138 force_sig(SIGTRAP, current); BUILD_TRAP_HANDLER() 164 force_sig(SIGTRAP, current); BUILD_TRAP_HANDLER()
|
H A D | sys_sh.c | 71 down_read(¤t->mm->mmap_sem); sys_cacheflush() 72 vma = find_vma (current->mm, addr); sys_cacheflush() 74 up_read(¤t->mm->mmap_sem); sys_cacheflush() 93 up_read(¤t->mm->mmap_sem); sys_cacheflush()
|
H A D | signal_32.c | 44 * current gcc compilers (<4.3) do not generate unwind info on 78 struct task_struct *tsk = current; restore_sigcontext_fpu() 91 struct task_struct *tsk = current; save_sigcontext_fpu() 103 attempted FPU operation by the 'current' process. save_sigcontext_fpu() 135 struct task_struct *tsk = current; restore_sigcontext() 159 current->restart_block.fn = do_no_restart_syscall; sys_sigreturn() 177 force_sig(SIGSEGV, current); sys_sigreturn() 189 current->restart_block.fn = do_no_restart_syscall; sys_rt_sigreturn() 208 force_sig(SIGSEGV, current); sys_rt_sigreturn() 254 sp = current->sas_ss_sp + current->sas_ss_size; get_sigframe() 287 } else if (likely(current->mm->context.vdso)) { setup_frame() 313 if (current->personality & FDPIC_FUNCPTRS) { setup_frame() 326 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); setup_frame() 357 } else if (likely(current->mm->context.vdso)) { setup_rt_frame() 383 if (current->personality & FDPIC_FUNCPTRS) { setup_rt_frame() 396 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); setup_rt_frame()
|
/linux-4.4.14/arch/mips/include/asm/ |
H A D | fpu.h | 23 #include <asm/current.h> 63 /* just enable the FPU in its current mode */ __enable_fpu() 135 KSTK_STATUS(current) |= ST0_CU1; __own_fpu() 137 KSTK_STATUS(current) |= ST0_FR; __own_fpu() 139 KSTK_STATUS(current) &= ~ST0_FR; __own_fpu() 152 _restore_fp(current); own_fpu_inatomic() 190 lose_fpu_inatomic(save, current); lose_fpu() 196 unsigned int fcr31 = current->thread.fpu.fcr31; init_fpu() 246 if (tsk == current) { get_fpu_regs() 249 _save_fp(current); get_fpu_regs()
|
H A D | stackprotector.h | 36 current->stack_canary = canary; boot_init_stack_canary() 37 __stack_chk_guard = current->stack_canary; boot_init_stack_canary()
|
/linux-4.4.14/fs/quota/ |
H A D | quotaio_v2.h | 12 * Definitions of magics and versions of current quota files 41 __le32 dqb_curinodes; /* current # allocated inodes */ 44 __le64 dqb_curspace; /* current space occupied (in bytes) */ 54 __le64 dqb_curinodes; /* current # allocated inodes */ 57 __le64 dqb_curspace; /* current space occupied (in bytes) */
|
H A D | quotaio_v1.h | 23 __u32 dqb_curblocks; /* current block count */ 26 __u32 dqb_curinodes; /* current # allocated inodes */
|
/linux-4.4.14/include/linux/mfd/wm8350/ |
H A D | supply.h | 114 int eoc_mA; /* end of charge current (mA) */ 116 int fast_limit_mA; /* fast charge current limit */ 117 int fast_limit_USB_mA; /* USB fast charge current limit */ 120 int trickle_charge_mA; /* trickle charge current */ 121 int trickle_charge_USB_mA; /* USB trickle charge current */
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ |
H A D | count_instructions.c | 76 u64 current, overhead; determine_overhead() local 84 current = events[0].result.value; determine_overhead() 85 if (current < overhead) { determine_overhead() 86 printf("Replacing overhead %llu with %llu\n", overhead, current); determine_overhead() 87 overhead = current; determine_overhead()
|
/linux-4.4.14/arch/powerpc/mm/ |
H A D | mmu_context_iommu.c | 56 current->pid, mm_iommu_adjust_locked_vm() 68 if (!current || !current->mm) mm_iommu_preregistered() 71 return !list_empty(¤t->mm->context.iommu_group_mem_list); mm_iommu_preregistered() 82 if (!current || !current->mm) mm_iommu_get() 87 list_for_each_entry_rcu(mem, ¤t->mm->context.iommu_group_mem_list, mm_iommu_get() 105 ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); mm_iommu_get() 145 list_add_rcu(&mem->next, ¤t->mm->context.iommu_group_mem_list); mm_iommu_get() 149 mm_iommu_adjust_locked_vm(current->mm, locked_entries, false); mm_iommu_get() 194 mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); mm_iommu_release() 202 if (!current || !current->mm) mm_iommu_put() 240 ¤t->mm->context.iommu_group_mem_list, mm_iommu_lookup() 260 ¤t->mm->context.iommu_group_mem_list, mm_iommu_find()
|
/linux-4.4.14/drivers/cpufreq/ |
H A D | integrator-cpufreq.c | 89 cpus_allowed = current->cpus_allowed; integrator_set_target() 95 set_cpus_allowed_ptr(current, cpumask_of(cpu)); integrator_set_target() 98 /* get current setting */ integrator_set_target() 120 set_cpus_allowed_ptr(current, &cpus_allowed); integrator_set_target() 143 set_cpus_allowed_ptr(current, &cpus_allowed); integrator_set_target() 157 cpus_allowed = current->cpus_allowed; integrator_get() 159 set_cpus_allowed_ptr(current, cpumask_of(cpu)); integrator_get() 172 current_freq = icst_hz(&cclk_params, vco) / 1000; /* current freq */ integrator_get() 174 set_cpus_allowed_ptr(current, &cpus_allowed); integrator_get()
|
/linux-4.4.14/include/linux/power/ |
H A D | smb347-charger.h | 45 * @max_charge_current: maximum current (in uA) the battery can be charged 47 * @pre_charge_current: current (in uA) to use in pre-charging phase 48 * @termination_current: current (in uA) used to determine when the 52 * @mains_current_limit: maximum input current drawn from AC/DC input (in uA) 53 * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB 56 * current [%100 - %130] (in degree C) 68 * @charge_current_compensation: current (in uA) for charging compensation 69 * current when temperature hits soft limits 89 * If zero value is given in any of the current and voltage values, the
|
H A D | bq2415x_charger.h | 31 * termination current. It it is less or equal to zero, configuring charge 32 * and termination current will not be possible. 36 * POWER_SUPPLY_PROP_CURRENT_MAX when current changed. 39 /* Supported modes with maximal current limit */
|
/linux-4.4.14/arch/mn10300/kernel/ |
H A D | fpu.c | 32 * be meant for a process other than the current one 36 struct task_struct *tsk = current; fpu_exception() 76 struct task_struct *tsk = current; fpu_setup_sigcontext() 81 /* transfer the current FPU state to memory and cause fpu_init() to be fpu_setup_sigcontext() 82 * triggered by the next attempted FPU operation by the current fpu_setup_sigcontext() 103 /* we no longer have a valid current FPU state */ fpu_setup_sigcontext() 138 /* we no longer have a valid current FPU state */ fpu_kill_state() 147 struct task_struct *tsk = current; fpu_restore_sigcontext() 165 struct task_struct *tsk = current; dump_fpu()
|
/linux-4.4.14/arch/x86/ia32/ |
H A D | ia32_aout.c | 46 struct perf_event *bp = current->thread.ptrace_bps[n]; get_dr() 62 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; dump_thread32() 64 (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; dump_thread32() 70 dump->u_debugreg[6] = current->thread.debugreg6; dump_thread32() 71 dump->u_debugreg[7] = current->thread.ptrace_dr7; dump_thread32() 87 dump->regs.ds = current->thread.ds; dump_thread32() 88 dump->regs.es = current->thread.es; dump_thread32() 140 * Routine writes a core dump image in the current directory. 144 * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" 159 strncpy(dump.u_comm, current->comm, sizeof(current->comm)); aout_core_dump() 232 current->mm->arg_start = (unsigned long) p; create_aout_tables() 242 current->mm->arg_end = current->mm->env_start = (unsigned long) p; create_aout_tables() 252 current->mm->env_end = (unsigned long) p; create_aout_tables() 303 current->mm->end_code = ex.a_text + load_aout_binary() 304 (current->mm->start_code = N_TXTADDR(ex)); load_aout_binary() 305 current->mm->end_data = ex.a_data + load_aout_binary() 306 (current->mm->start_data = N_DATADDR(ex)); load_aout_binary() 307 current->mm->brk = ex.a_bss + load_aout_binary() 308 (current->mm->start_brk = N_BSSADDR(ex)); load_aout_binary() 378 set_brk(current->mm->start_brk, current->mm->brk); load_aout_binary() 380 current->mm->start_stack = load_aout_binary() 388 (regs)->sp = current->mm->start_stack; load_aout_binary()
|
/linux-4.4.14/arch/xtensa/mm/ |
H A D | fault.c | 41 struct mm_struct *mm = current->mm; do_page_fault() 72 printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid, do_page_fault() 115 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) do_page_fault() 129 current->maj_flt++; do_page_fault() 131 current->min_flt++; do_page_fault() 160 current->thread.bad_vaddr = address; do_page_fault() 161 current->thread.error_code = is_write; do_page_fault() 166 force_sig_info(SIGSEGV, &info, current); do_page_fault() 190 current->thread.bad_vaddr = address; do_page_fault() 195 force_sig_info(SIGBUS, &info, current); do_page_fault() 207 struct mm_struct *act_mm = current->active_mm; do_page_fault() 252 current->comm, regs->pc, entry->fixup); bad_page_fault() 254 current->thread.bad_uaddr = address; bad_page_fault()
|
/linux-4.4.14/arch/unicore32/kernel/ |
H A D | ptrace.c | 121 if (!(current->ptrace & PT_PTRACED)) syscall_trace() 135 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) syscall_trace() 142 if (current->exit_code) { syscall_trace() 143 send_sig(current->exit_code, current, 1); syscall_trace() 144 current->exit_code = 0; syscall_trace()
|
/linux-4.4.14/mm/ |
H A D | vmacache.c | 57 return current->mm == mm && !(current->flags & PF_KTHREAD); vmacache_valid_mm() 63 current->vmacache[VMACACHE_HASH(addr)] = newvma; vmacache_update() 73 curr = current; vmacache_valid() 96 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find() 124 struct vm_area_struct *vma = current->vmacache[i]; vmacache_find_exact()
|
H A D | mmu_context.c | 23 struct task_struct *tsk = current; use_mm() 53 struct task_struct *tsk = current; unuse_mm()
|
/linux-4.4.14/fs/xfs/ |
H A D | kmem.c | 59 current->comm, current->pid, kmem_alloc() 83 if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) kmem_zalloc_large() 89 if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) kmem_zalloc_large() 125 current->comm, current->pid, kmem_zone_alloc()
|
/linux-4.4.14/arch/h8300/include/asm/ |
H A D | processor.h | 15 * Default implementation of macro that returns current 23 #include <asm/current.h> 89 (_regs)->er5 = current->mm->start_data; /* GOT base */ \ 99 (_regs)->er5 = current->mm->start_data; /* GOT base */ \ 114 * Free current thread data structures etc.. 134 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
/linux-4.4.14/sound/pci/ |
H A D | ad1889.h | 53 #define AD_DMA_RESCA 0x44 /* RES current address */ 55 #define AD_DMA_RESCC 0x4c /* RES current count */ 58 #define AD_DMA_ADCCA 0x54 /* ADC current address */ 60 #define AD_DMA_ADCCC 0x5c /* ADC current count */ 63 #define AD_DMA_SYNCA 0x64 /* synth current address */ 65 #define AD_DMA_SYNCC 0x6c /* synth current count */ 68 #define AD_DMA_WAVCA 0x74 /* wave current address */ 70 #define AD_DMA_WAVCC 0x7c /* wave current count */ 72 #define AD_DMA_RESIC 0x80 /* RES dma interrupt current byte count */ 75 #define AD_DMA_ADCIC 0x88 /* ADC dma interrupt current byte count */ 78 #define AD_DMA_SYNIC 0x90 /* synth dma interrupt current byte count */ 81 #define AD_DMA_WAVIC 0x98 /* wave dma interrupt current byte count */ 84 #define AD_DMA_ICC 0xffffff /* current byte count mask */
|
/linux-4.4.14/drivers/i2c/busses/ |
H A D | i2c-designware-core.h | 44 * @msg_write_idx: the element index of the current tx message in the msgs 46 * @tx_buf_len: the length of the current tx buffer 47 * @tx_buf: the current tx buffer 48 * @msg_read_idx: the element index of the current rx message in the msgs 50 * @rx_buf_len: the length of the current rx buffer 51 * @rx_buf: the current rx buffer 52 * @msg_err: error status of the current transfer 59 * @rx_outstanding: current master-rx elements in tx fifo
|
/linux-4.4.14/include/trace/events/ |
H A D | syscalls.h | 29 syscall_get_arguments(current, regs, 0, 6, __entry->args); 54 __entry->id = syscall_get_nr(current, regs);
|
/linux-4.4.14/arch/cris/kernel/ |
H A D | stacktrace.c | 57 if (tsk != current) { save_stack_trace_tsk() 72 save_stack_trace_tsk(current, trace); save_stack_trace()
|
/linux-4.4.14/arch/arm/probes/uprobes/ |
H A D | core.c | 138 struct uprobe_task *utask = current->utask; arch_uprobe_pre_xol() 143 utask->autask.saved_trap_no = current->thread.trap_no; arch_uprobe_pre_xol() 144 current->thread.trap_no = UPROBE_TRAP_NR; arch_uprobe_pre_xol() 152 struct uprobe_task *utask = current->utask; arch_uprobe_post_xol() 154 WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR); arch_uprobe_post_xol() 156 current->thread.trap_no = utask->autask.saved_trap_no; arch_uprobe_post_xol() 175 struct uprobe_task *utask = current->utask; arch_uprobe_abort_xol() 177 current->thread.trap_no = utask->autask.saved_trap_no; arch_uprobe_abort_xol()
|
/linux-4.4.14/arch/arm64/include/asm/ |
H A D | stackprotector.h | 34 current->stack_canary = canary; boot_init_stack_canary() 35 __stack_chk_guard = current->stack_canary; boot_init_stack_canary()
|
/linux-4.4.14/arch/avr32/kernel/ |
H A D | stacktrace.c | 32 low = (unsigned long)task_stack_page(current); save_stack_trace() 49 * current frame. save_stack_trace()
|
/linux-4.4.14/arch/blackfin/kernel/ |
H A D | stacktrace.c | 30 low = (unsigned long)task_stack_page(current); save_stack_trace() 47 * current frame. save_stack_trace()
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | stackprotector.h | 34 current->stack_canary = canary; boot_init_stack_canary() 35 __stack_chk_guard = current->stack_canary; boot_init_stack_canary()
|
H A D | processor.h | 15 * Default implementation of macro that returns current 28 #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ 53 #define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data 59 if (current->personality & ADDR_LIMIT_32BIT) \
|
/linux-4.4.14/drivers/mtd/tests/ |
H A D | mtd_test.h | 7 if (signal_pending(current)) { mtdtest_relax()
|
/linux-4.4.14/arch/sparc/power/ |
H A D | hibernate.c | 36 struct mm_struct *mm = current->active_mm; restore_processor_state()
|
/linux-4.4.14/arch/xtensa/kernel/ |
H A D | traps.c | 183 /* If in user mode, send SIGILL signal to current process */ do_unhandled() 187 current->comm, task_pid_nr(current), regs->pc, exccause); do_unhandled() 188 force_sig(SIGILL, current); do_unhandled() 279 /* If in user mode, send SIGILL signal to current process. */ do_illegal_instruction() 282 current->comm, task_pid_nr(current), regs->pc); do_illegal_instruction() 283 force_sig(SIGILL, current); do_illegal_instruction() 303 current->thread.bad_vaddr = regs->excvaddr; do_unaligned_user() 304 current->thread.error_code = -3; do_unaligned_user() 307 regs->excvaddr, current->comm, task_pid_nr(current), regs->pc); do_unaligned_user() 312 force_sig_info(SIGSEGV, &info, current); do_unaligned_user() 336 /* If in user mode, send SIGTRAP signal to current process */ do_debug() 338 force_sig(SIGTRAP, current); do_debug() 420 * This function dumps the current valid window frame and other base registers.
|
/linux-4.4.14/include/uapi/linux/netfilter/ |
H A D | xt_NFQUEUE.h | 34 #define NFQ_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
|
/linux-4.4.14/arch/arm/mach-s3c24xx/ |
H A D | simtec-usb.c | 41 /* control power and monitor over-current events on various Simtec 66 pr_debug("usb_simtec: over-current irq (oc detected)\n"); usb_simtec_ocirq() 69 pr_debug("usb_simtec: over-current irq (oc cleared)\n"); usb_simtec_ocirq() 83 "USB Over-current", info); usb_simtec_enableoc()
|
/linux-4.4.14/tools/iio/ |
H A D | iio_utils.c | 34 char *current; iioutils_break_up_name() local 46 current = strdup(full_name + strlen(prefix) + 1); iioutils_break_up_name() 47 if (!current) iioutils_break_up_name() 50 working = strtok(current, "_\0"); iioutils_break_up_name() 52 free(current); iioutils_break_up_name() 69 free(current); iioutils_break_up_name() 317 struct iio_channel_info *current; build_channel_array() local 384 current = &(*ci_array)[count++]; build_channel_array() 423 current->scale = 1.0; build_channel_array() 424 current->offset = 0; build_channel_array() 425 current->name = strndup(ent->d_name, build_channel_array() 428 if (!current->name) { build_channel_array() 436 ret = iioutils_break_up_name(current->name, build_channel_array() 437 ¤t->generic_name); build_channel_array() 440 free(current->name); build_channel_array() 448 current->name); build_channel_array() 465 if (fscanf(sysfsfp, "%u", ¤t->index) != 1) { build_channel_array() 482 ret = iioutils_get_param_float(¤t->scale, build_channel_array() 485 current->name, build_channel_array() 486 current->generic_name); build_channel_array() 490 ret = iioutils_get_param_float(¤t->offset, build_channel_array() 493 current->name, build_channel_array() 494 current->generic_name); build_channel_array() 498 ret = iioutils_get_type(¤t->is_signed, build_channel_array() 499 ¤t->bytes, build_channel_array() 500 ¤t->bits_used, build_channel_array() 501 ¤t->shift, build_channel_array() 502 ¤t->mask, build_channel_array() 503 ¤t->be, build_channel_array() 505 current->name, build_channel_array() 506 current->generic_name); build_channel_array()
|
/linux-4.4.14/drivers/media/i2c/ |
H A D | lm3646.c | 157 /* read and check current mode of chip to start flash */ lm3646_set_ctrl() 169 * read and check current mode of chip to stop flash lm3646_set_ctrl() 233 /* max flash current */ lm3646_init_controls() 240 /* max torch current */ lm3646_init_controls() 313 * LED1 flash current setting lm3646_init_device() 314 * LED2 flash current = Total(Max) flash current - LED1 flash current lm3646_init_device() 325 * LED1 torch current setting lm3646_init_device() 326 * LED2 torch current = Total(Max) torch current - LED1 torch current lm3646_init_device()
|
/linux-4.4.14/include/media/ |
H A D | as3645a.h | 53 * @peak: Inductor peak current limit (0=1.25A, 1=1.5A, 2=1.75A, 3=2.0A) 55 * @flash_max_current: Max flash current (mA, <= AS3645A_FLASH_INTENSITY_MAX) 56 * @torch_max_current: Max torch current (mA, >= AS3645A_TORCH_INTENSITY_MAX)
|
/linux-4.4.14/include/net/ |
H A D | cls_cgroup.h | 48 classid = task_cls_classid(current); sock_update_classid() 55 u32 classid = task_cls_state(current)->classid; task_get_classid() 58 * packets originating from softirq context as accessing `current' task_get_classid()
|
/linux-4.4.14/include/drm/ |
H A D | drm_os_linux.h | 23 #define DRM_CURRENTPID task_pid_nr(current) 45 DECLARE_WAITQUEUE(entry, current); \ 58 if (signal_pending(current)) { \
|
/linux-4.4.14/arch/sh/kernel/cpu/ |
H A D | fpu.c | 10 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) init_fpu() 42 struct task_struct *tsk = current; __fpu_state_restore() 52 struct task_struct *tsk = current; fpu_state_restore()
|
/linux-4.4.14/arch/sparc/include/uapi/asm/ |
H A D | openpromio.h | 34 #define OPROMSETCUR 0x20004FF0 /* int node - Sets current node */ 35 #define OPROMPCI2NODE 0x20004FF1 /* int pci_bus, pci_devfn - Sets current node to PCI device's node */ 36 #define OPROMPATH2NODE 0x20004FF2 /* char path[] - Set current node from fully qualified PROM path */
|
H A D | psr.h | 6 * PSTATE.PRIV for the current CPU privilege level. 21 #define PSR_CWP 0x0000001f /* current window pointer */ 24 #define PSR_S 0x00000080 /* current privilege level */
|
/linux-4.4.14/arch/mips/mm/ |
H A D | fault.c | 43 struct task_struct *tsk = current; __do_page_fault() 54 current->comm, current->pid, field, address, write, __do_page_fault() 63 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) __do_page_fault() 128 current->comm, current->pid, __do_page_fault() 139 current->comm, current->pid, __do_page_fault() 158 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) __do_page_fault() 225 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; __do_page_fault() 237 current->thread.cp0_baduaddr = address; __do_page_fault() 284 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; __do_page_fault()
|
/linux-4.4.14/arch/nios2/kernel/ |
H A D | process.c | 154 pr_emerg("COMM=%s PID=%d\n", current->comm, current->pid); dump() 156 if (current->mm) { dump() 158 (int) current->mm->start_code, dump() 159 (int) current->mm->end_code, dump() 160 (int) current->mm->start_data, dump() 161 (int) current->mm->end_data, dump() 162 (int) current->mm->end_data, dump() 163 (int) current->mm->brk); dump() 165 (int) current->mm->start_stack, dump() 166 (int)(((unsigned long) current) + THREAD_SIZE)); dump() 222 if (!p || p == current || p->state == TASK_RUNNING) get_wchan()
|
/linux-4.4.14/arch/openrisc/include/asm/ |
H A D | tlbflush.h | 26 #include <asm/current.h> 30 * - flush_tlb() flushes the current mm struct TLBs 46 flush_tlb_mm(current->mm); flush_tlb()
|
/linux-4.4.14/arch/arm/lib/ |
H A D | uaccess_with_memcpy.c | 22 #include <asm/current.h> 35 pgd = pgd_offset(current->mm, addr); pin_page_for_write() 58 ptl = ¤t->mm->page_table_lock; pin_page_for_write() 75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); pin_page_for_write() 103 down_read(¤t->mm->mmap_sem); __copy_to_user_memcpy() 111 up_read(¤t->mm->mmap_sem); __copy_to_user_memcpy() 115 down_read(¤t->mm->mmap_sem); __copy_to_user_memcpy() 135 up_read(¤t->mm->mmap_sem); __copy_to_user_memcpy() 171 down_read(¤t->mm->mmap_sem); __clear_user_memset() 178 up_read(¤t->mm->mmap_sem); __clear_user_memset() 181 down_read(¤t->mm->mmap_sem); __clear_user_memset() 199 up_read(¤t->mm->mmap_sem); __clear_user_memset()
|
/linux-4.4.14/net/netlabel/ |
H A D | netlabel_user.h | 51 security_task_getsecid(current, &audit_info->secid); netlbl_netlink_auditinfo() 52 audit_info->loginuid = audit_get_loginuid(current); netlbl_netlink_auditinfo() 53 audit_info->sessionid = audit_get_sessionid(current); netlbl_netlink_auditinfo()
|
/linux-4.4.14/sound/usb/6fire/ |
H A D | pcm.h | 46 snd_pcm_uframes_t dma_off; /* current position in alsa dma_area */ 47 snd_pcm_uframes_t period_off; /* current position in current period */
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/linux/ |
H A D | obd.h | 48 #include <linux/sched.h> /* for struct task_struct, for current.h */ 77 lock->task = current; __client_obd_list_lock() 92 current->comm, current->pid, __client_obd_list_lock() 96 LCONSOLE_WARN("====== for current process =====\n"); __client_obd_list_lock()
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
H A D | bnx2i_sysfs.c | 33 * @buf: buffer to return current SQ size parameter 35 * Returns current SQ size parameter, this paramater determines the number 50 * @buf: buffer to return current SQ size parameter 90 * @buf: buffer to return current SQ size parameter 106 * @buf: buffer to return current SQ size parameter
|
/linux-4.4.14/drivers/macintosh/ |
H A D | windfarm_pid.h | 37 int index; /* index of current sample */ 38 s32 target; /* current target value */ 72 int index; /* index of current power */ 73 int tindex; /* index of current temp */ 74 s32 target; /* current target value */
|
/linux-4.4.14/arch/cris/include/uapi/asm/ |
H A D | etraxgpio.h | 33 #define IO_READBITS 0x1 /* read and return current port bits (obsolete) */ 50 returns mask with current inputs (obsolete) */ 52 returns mask with current outputs (obsolete)*/ 78 /* *arg updated with current input pins. */ 80 /* *arg updated with current output pins. */
|
/linux-4.4.14/include/linux/i2c/ |
H A D | adp8870.h | 84 * Blacklight current 0..30mA 89 * L2 comparator current 0..1106uA 94 * L3 comparator current 0..551uA 99 * L4 comparator current 0..275uA 104 * L5 comparator current 0..138uA
|
/linux-4.4.14/tools/testing/selftests/powerpc/pmu/ebb/ |
H A D | instruction_count_test.c | 72 uint64_t current, overhead; determine_overhead() local 80 current = event->result.value; determine_overhead() 81 if (current < overhead) { determine_overhead() 82 printf("Replacing overhead %lu with %lu\n", overhead, current); determine_overhead() 83 overhead = current; determine_overhead()
|
/linux-4.4.14/arch/cris/arch-v32/kernel/ |
H A D | traps.c | 15 * It's possible to use either the USP register or current->thread.usp. show_registers() 16 * USP might not correspond to the current process for all cases this show_registers() 17 * function is called, and current->thread.usp isn't up to date for the show_registers() 18 * current process. Experience shows that using USP is the way to go. show_registers() 53 current->comm, current->pid, (unsigned long)current); show_registers()
|
/linux-4.4.14/drivers/md/persistent-data/ |
H A D | dm-block-manager.c | 106 if (lock->holders[i] == current) { __check_holder() 130 set_task_state(current, TASK_UNINTERRUPTIBLE); __wait() 138 set_task_state(current, TASK_RUNNING); __wait() 212 __add_holder(lock, current); bl_down_read() 217 get_task_struct(current); bl_down_read() 219 w.task = current; bl_down_read() 225 put_task_struct(current); bl_down_read() 240 __add_holder(lock, current); bl_down_read_nonblock() 254 __del_holder(lock, current); bl_up_read() 275 __add_holder(lock, current); bl_down_write() 280 get_task_struct(current); bl_down_write() 281 w.task = current; bl_down_write() 292 put_task_struct(current); bl_down_write() 300 __del_holder(lock, current); bl_up_write()
|
/linux-4.4.14/arch/ia64/kernel/ |
H A D | traps.c | 64 current->comm, task_pid_nr(current), str, err, ++die_counter); die() 183 force_sig_info(sig, &siginfo, current); ia64_bad_break() 189 * current fph partition in the task_struct of the fpu-owner (if necessary) and then load 190 * the fp-high partition of the current task (if necessary). Note that the kernel has 212 if (ia64_is_local_fpu_owner(current)) { disabled_fph_fault() 221 ia64_set_local_fpu_owner(current); disabled_fph_fault() 222 if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) { disabled_fph_fault() 223 __ia64_load_fpu(current->thread.fph); disabled_fph_fault() 300 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { handle_fpu_swa() 328 current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr); handle_fpu_swa() 363 force_sig_info(SIGFPE, &siginfo, current); handle_fpu_swa() 385 force_sig_info(SIGFPE, &siginfo, current); handle_fpu_swa() 423 force_sig_info(SIGILL, &si, current); ia64_illegal_op_fault() 467 current->comm, task_pid_nr(current), ia64_fault() 505 force_sig_info(sig, &siginfo, current); ia64_fault() 521 force_sig_info(SIGILL, &siginfo, current); ia64_fault() 530 if (fsys_mode(current, ®s)) { ia64_fault() 574 force_sig_info(SIGTRAP, &siginfo, current); ia64_fault() 580 if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) { ia64_fault() 588 force_sig_info(SIGFPE, &siginfo, current); ia64_fault() 621 force_sig_info(SIGILL, &siginfo, current); ia64_fault() 632 force_sig(SIGSEGV, current); ia64_fault() 639 force_sig(SIGSEGV, current); ia64_fault() 651 force_sig(SIGILL, current); ia64_fault()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_user_pages.c | 54 /* call with current->mm->mmap_sem held */ __ipath_get_user_pages() 73 ret = get_user_pages(current, current->mm, __ipath_get_user_pages() 81 current->mm->pinned_vm += num_pages; __ipath_get_user_pages() 165 down_write(¤t->mm->mmap_sem); ipath_get_user_pages() 169 up_write(¤t->mm->mmap_sem); ipath_get_user_pages() 176 down_write(¤t->mm->mmap_sem); ipath_release_user_pages() 180 current->mm->pinned_vm -= num_pages; ipath_release_user_pages() 182 up_write(¤t->mm->mmap_sem); ipath_release_user_pages() 210 mm = get_task_mm(current); ipath_release_user_pages_on_close()
|
/linux-4.4.14/sound/usb/ |
H A D | card.h | 91 unsigned int curpacksize; /* current packet size in bytes (for capture) */ 92 unsigned int curframesize; /* current packet size in frames (for capture) */ 113 int interface; /* current interface */ 115 struct audioformat *cur_audiofmt; /* current audioformat pointer (for hw_params callback) */ 116 snd_pcm_format_t pcm_format; /* current audio format (for hw_params callback) */ 117 unsigned int channels; /* current number of channels (for hw_params callback) */ 119 unsigned int cur_rate; /* current rate (for hw_params callback) */ 120 unsigned int period_bytes; /* current period bytes (for hw_params callback) */ 121 unsigned int period_frames; /* current frames per period */ 122 unsigned int buffer_periods; /* current periods per buffer */
|
/linux-4.4.14/arch/x86/kernel/fpu/ |
H A D | core.c | 76 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS); interrupted_kernel_fpu_idle() 110 struct fpu *fpu = ¤t->thread.fpu; __kernel_fpu_begin() 127 struct fpu *fpu = ¤t->thread.fpu; __kernel_fpu_end() 184 * This only ever gets called for the current task. 188 WARN_ON_FPU(fpu != ¤t->thread.fpu); fpu__save() 227 * Copy the current task's FPU state to a new task's FPU context. 234 WARN_ON_FPU(src_fpu != ¤t->thread.fpu); fpu_copy() 244 * Save current FPU registers directly into the child fpu_copy() 249 * context and mark the current task for lazy restore. fpu_copy() 280 * Activate the current task's in-memory FPU context, 285 WARN_ON_FPU(fpu != ¤t->thread.fpu); fpu__activate_curr() 290 /* Safe to do for the current task: */ fpu__activate_curr() 307 * If fpregs are active (in the current CPU), then fpu__activate_fpstate_read() 316 /* Safe to do for current and for stopped child tasks: */ fpu__activate_fpstate_read() 341 WARN_ON_FPU(fpu == ¤t->thread.fpu); fpu__activate_fpstate_write() 378 * Drops current FPU state: deactivates the fpregs and 424 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ fpu__clear()
|