/linux-4.1.27/kernel/locking/ |
D | rwsem-spinlock.c | 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { in rwsem_is_locked() 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_is_locked() 50 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem() 134 raw_spin_lock_irqsave(&sem->wait_lock, flags); in __down_read() 139 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read() 154 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read() 178 raw_spin_lock_irqsave(&sem->wait_lock, flags); in __down_read_trylock() 186 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_read_trylock() 200 raw_spin_lock_irqsave(&sem->wait_lock, flags); in __down_write_nested() 219 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in __down_write_nested() [all …]
|
D | rtmutex.c | 94 __releases(lock->wait_lock) in unlock_rt_mutex_safe() 99 raw_spin_unlock(&lock->wait_lock); in unlock_rt_mutex_safe() 139 __releases(lock->wait_lock) in unlock_rt_mutex_safe() 142 raw_spin_unlock(&lock->wait_lock); in unlock_rt_mutex_safe() 549 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain() 566 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain() 589 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain() 612 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain() 651 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain() 713 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain() [all …]
|
D | rwsem-xadd.c | 84 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem() 225 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_failed() 243 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_failed() 443 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed() 471 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed() 479 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed() 484 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed() 499 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake() 505 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake() 521 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake() [all …]
|
D | mutex.c | 53 spin_lock_init(&lock->wait_lock); in __mutex_init() 186 spin_lock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath() 191 spin_unlock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath() 529 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common() 580 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common() 582 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common() 602 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common() 608 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common() 737 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath() 752 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath() [all …]
|
D | mutex-debug.c | 39 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); in debug_mutex_wake_waiter() 54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); in debug_mutex_add_waiter()
|
D | mutex-debug.h | 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
|
/linux-4.1.27/drivers/tty/ |
D | tty_ldsem.c | 114 raw_spin_lock_init(&sem->wait_lock); in __init_ldsem() 191 raw_spin_lock_irqsave(&sem->wait_lock, flags); in ldsem_wake() 193 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in ldsem_wake() 207 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 216 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 231 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 250 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 254 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 258 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 276 raw_spin_lock_irq(&sem->wait_lock); in down_write_failed() [all …]
|
/linux-4.1.27/include/linux/ |
D | rtmutex.h | 30 raw_spinlock_t wait_lock; member 70 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
D | mutex.h | 53 spinlock_t wait_lock; member 111 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
D | rwsem.h | 30 raw_spinlock_t wait_lock; member 77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
|
D | rwsem-spinlock.h | 25 raw_spinlock_t wait_lock; member
|
D | tty_ldisc.h | 139 raw_spinlock_t wait_lock; member
|
/linux-4.1.27/Documentation/locking/ |
D | rt-mutex-design.txt | 210 wait_lock. Since the modification of the waiter list is never done in 211 interrupt context, the wait_lock can be taken without disabling interrupts. 457 Next, we look at the mutex that the task is blocked on. The mutex's wait_lock 459 pi_lock and wait_lock goes in the opposite direction. If we fail to grab the 462 Now that we have both the pi_lock of the task as well as the wait_lock of 484 wait_lock, and continue the loop again. On the next iteration of the 489 since we just grab the mutex's wait_lock. And one could be right. 501 the protection of the mutex's wait_lock, which was not taken yet. 505 wait_lock. If we fail that lock, we release the pi_lock of the 508 In the code to release the lock, the wait_lock of the mutex is held [all …]
|
/linux-4.1.27/kernel/ |
D | futex.c | 1142 raw_spin_lock(&pi_state->pi_mutex.wait_lock); in wake_futex_pi() 1175 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); in wake_futex_pi() 1190 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); in wake_futex_pi() 2034 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); in fixup_owner() 2038 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); in fixup_owner()
|
/linux-4.1.27/arch/arm/mach-omap2/ |
D | sram243x.S | 292 wait_lock: label 296 bne wait_lock @ wait if not
|
D | sram242x.S | 292 wait_lock: label 296 bne wait_lock @ wait if not
|