/linux-4.1.27/kernel/locking/ |
H A D | rtmutex.c | 284 * This can be both boosting and unboosting. task->pi_lock must be held. 307 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio() 309 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio() 359 * we dropped its pi_lock. Is never dereferenced, only used for 372 * [P] task->pi_lock held 389 * [1] lock(task->pi_lock); [R] acquire [P] 394 * unlock(task->pi_lock); release [P] 399 * [8] unlock(task->pi_lock); release [P] 404 * lock(task->pi_lock); [L] acquire [P] 407 * [13] unlock(task->pi_lock); release [P] 466 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 474 * [3] check_exit_conditions_1() protected by task->pi_lock. rt_mutex_adjust_prio_chain() 545 * [5] We need to trylock here as we are holding task->pi_lock, rt_mutex_adjust_prio_chain() 550 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 556 * [6] check_exit_conditions_2() protected by task->pi_lock and rt_mutex_adjust_prio_chain() 581 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 596 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 611 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 633 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 658 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 695 * [12] check_exit_conditions_4() protected by task->pi_lock rt_mutex_adjust_prio_chain() 702 * task->pi_lock next_lock cannot be dereferenced anymore. rt_mutex_adjust_prio_chain() 712 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 736 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain() 828 * pi_lock dance.@task->pi_blocked_on is NULL try_to_take_rt_mutex() 838 * @task->pi_lock. Redundant operation for the @waiter == NULL try_to_take_rt_mutex() 842 raw_spin_lock_irqsave(&task->pi_lock, flags); try_to_take_rt_mutex() 851 raw_spin_unlock_irqrestore(&task->pi_lock, flags); try_to_take_rt_mutex() 898 raw_spin_lock_irqsave(&task->pi_lock, flags); task_blocks_on_rt_mutex() 911 raw_spin_unlock_irqrestore(&task->pi_lock, flags); task_blocks_on_rt_mutex() 916 raw_spin_lock_irqsave(&owner->pi_lock, flags); task_blocks_on_rt_mutex() 931 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); task_blocks_on_rt_mutex() 970 raw_spin_lock_irqsave(¤t->pi_lock, flags); wakeup_next_waiter() 992 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); wakeup_next_waiter() 1016 raw_spin_lock_irqsave(¤t->pi_lock, flags); remove_waiter() 1019 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); remove_waiter() 1028 raw_spin_lock_irqsave(&owner->pi_lock, flags); remove_waiter() 1040 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); remove_waiter() 1071 raw_spin_lock_irqsave(&task->pi_lock, flags); rt_mutex_adjust_pi() 1076 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_pi() 1080 raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_pi()
|
/linux-4.1.27/kernel/ |
H A D | task_work.c | 64 raw_spin_lock_irqsave(&task->pi_lock, flags); task_work_cancel() 72 raw_spin_unlock_irqrestore(&task->pi_lock, flags); task_work_cancel() 108 raw_spin_unlock_wait(&task->pi_lock); task_work_run()
|
H A D | futex.c | 666 raw_spin_lock_irq(&pi_state->owner->pi_lock); free_pi_state() 668 raw_spin_unlock_irq(&pi_state->owner->pi_lock); free_pi_state() 724 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 731 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 735 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 749 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 755 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 757 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 912 * p->pi_lock: attach_to_pi_owner() 914 raw_spin_lock_irq(&p->pi_lock); attach_to_pi_owner() 923 raw_spin_unlock_irq(&p->pi_lock); attach_to_pi_owner() 945 raw_spin_unlock_irq(&p->pi_lock); attach_to_pi_owner() 1179 raw_spin_lock_irq(&pi_state->owner->pi_lock); wake_futex_pi() 1182 raw_spin_unlock_irq(&pi_state->owner->pi_lock); wake_futex_pi() 1184 raw_spin_lock_irq(&new_owner->pi_lock); wake_futex_pi() 1188 raw_spin_unlock_irq(&new_owner->pi_lock); wake_futex_pi() 1939 raw_spin_lock_irq(&pi_state->owner->pi_lock); fixup_pi_state_owner() 1942 raw_spin_unlock_irq(&pi_state->owner->pi_lock); fixup_pi_state_owner() 1947 raw_spin_lock_irq(&newowner->pi_lock); fixup_pi_state_owner() 1950 raw_spin_unlock_irq(&newowner->pi_lock); fixup_pi_state_owner()
|
H A D | exit.c | 707 raw_spin_unlock_wait(&tsk->pi_lock); do_exit() 815 * To avoid it, we have to wait for releasing tsk->pi_lock which do_exit() 819 raw_spin_unlock_wait(&tsk->pi_lock); do_exit()
|
H A D | fork.c | 1204 raw_spin_lock_init(&p->pi_lock); rt_mutex_init_task()
|
/linux-4.1.27/include/linux/ |
H A D | init_task.h | 239 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
|
H A D | sched.h | 1513 raw_spinlock_t pi_lock; member in struct:task_struct
|
/linux-4.1.27/kernel/sched/ |
H A D | sched.h | 904 * holding both task_struct::pi_lock and rq::lock. sched_ttwu_pending() 1208 * rq->lock. They are however serialized by p->pi_lock. 1420 lockdep_assert_held(&p->pi_lock); sched_avg_update() 1435 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. 1438 __acquires(p->pi_lock) 1444 raw_spin_lock_irqsave(&p->pi_lock, *flags); 1466 raw_spin_unlock_irqrestore(&p->pi_lock, *flags); 1482 __releases(p->pi_lock) 1485 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
|
H A D | core.c | 1032 * The caller should hold either p->pi_lock or rq->lock, when changing set_task_cpu() 1033 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. set_task_cpu() 1041 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || set_task_cpu() 1094 double_raw_lock(&arg->src_task->pi_lock, migrate_swap_stop() 1095 &arg->dst_task->pi_lock); migrate_swap_stop() 1116 raw_spin_unlock(&arg->dst_task->pi_lock); migrate_swap_stop() 1117 raw_spin_unlock(&arg->src_task->pi_lock); migrate_swap_stop() 1303 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1376 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. 1668 raw_spin_lock_irqsave(&p->pi_lock, flags); try_to_wake_up() 1707 raw_spin_unlock_irqrestore(&p->pi_lock, flags); try_to_wake_up() 1730 if (!raw_spin_trylock(&p->pi_lock)) { try_to_wake_up_local() 1732 raw_spin_lock(&p->pi_lock); try_to_wake_up_local() 1745 raw_spin_unlock(&p->pi_lock); try_to_wake_up_local() 1950 raw_spin_lock_irqsave(&p->pi_lock, flags); sched_fork() 1952 raw_spin_unlock_irqrestore(&p->pi_lock, flags); sched_fork() 2082 raw_spin_lock_irqsave(&p->pi_lock, flags); wake_up_new_task() 2424 raw_spin_lock_irqsave(&p->pi_lock, flags); sched_exec() 2432 raw_spin_unlock_irqrestore(&p->pi_lock, flags); sched_exec() 2437 raw_spin_unlock_irqrestore(&p->pi_lock, flags); sched_exec() 4158 raw_spin_lock_irqsave(&p->pi_lock, flags); sched_getaffinity() 4160 raw_spin_unlock_irqrestore(&p->pi_lock, flags); sched_getaffinity() 4849 raw_spin_lock(&p->pi_lock); __migrate_task() 4869 raw_spin_unlock(&p->pi_lock); __migrate_task()
|
H A D | deadline.c | 1680 /* Nobody will change task's class if pi_lock is held */ cancel_dl_timer() 1681 lockdep_assert_held(&p->pi_lock); cancel_dl_timer()
|
H A D | fair.c | 4910 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
|