/linux-4.1.27/kernel/locking/ |
H A D | rwsem-spinlock.c | 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { rwsem_is_locked() 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_is_locked() 50 raw_spin_lock_init(&sem->wait_lock); __init_rwsem() 134 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_read() 139 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read() 154 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read() 178 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_read_trylock() 186 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read_trylock() 200 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_nested() 219 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_nested() 221 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_nested() 227 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_nested() 243 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_trylock() 251 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_trylock() 263 raw_spin_lock_irqsave(&sem->wait_lock, flags); __up_read() 268 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __up_read() 278 raw_spin_lock_irqsave(&sem->wait_lock, flags); __up_write() 284 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __up_write() 295 raw_spin_lock_irqsave(&sem->wait_lock, flags); __downgrade_write() 301 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __downgrade_write()
|
H A D | rtmutex.c | 39 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 90 * 2) Drop lock->wait_lock 94 __releases(lock->wait_lock) 99 raw_spin_unlock(&lock->wait_lock); 104 * unlock(wait_lock); 105 * lock(wait_lock); 111 * unlock(wait_lock); 112 * lock(wait_lock); 117 * unlock(wait_lock); 118 * lock(wait_lock); 120 * unlock(wait_lock); 121 * lock(wait_lock); 136 * Simple slow path only version: lock->owner is protected by lock->wait_lock. 139 __releases(lock->wait_lock) 142 raw_spin_unlock(&lock->wait_lock); 298 * (Note: We do this outside of the protection of lock->wait_lock to 373 * [L] rtmutex->wait_lock held 393 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L] 408 * unlock(lock->wait_lock); release [L] 549 if (!raw_spin_trylock(&lock->wait_lock)) { rt_mutex_adjust_prio_chain() 557 * lock->wait_lock. rt_mutex_adjust_prio_chain() 566 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain() 585 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain() 589 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain() 612 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain() 637 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain() 651 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain() 696 * and lock->wait_lock. The actual decisions are made after we rt_mutex_adjust_prio_chain() 713 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain() 746 * Must be called with lock->wait_lock held. 762 * and they serialize on @lock->wait_lock. try_to_take_rt_mutex() 873 * This must be called with lock->wait_lock held. 942 * so the owner struct is protected by wait_lock. task_blocks_on_rt_mutex() 947 raw_spin_unlock(&lock->wait_lock); task_blocks_on_rt_mutex() 952 raw_spin_lock(&lock->wait_lock); task_blocks_on_rt_mutex() 963 * Called with lock->wait_lock held. 978 * lock->wait_lock. wakeup_next_waiter() 996 * long as we hold lock->wait_lock. The waiter task needs to wakeup_next_waiter() 1005 * Must be called with lock->wait_lock held and 1052 raw_spin_unlock(&lock->wait_lock); remove_waiter() 1057 raw_spin_lock(&lock->wait_lock); remove_waiter() 1097 * lock->wait_lock must be held by the caller. 1125 raw_spin_unlock(&lock->wait_lock); __rt_mutex_slowlock() 1131 raw_spin_lock(&lock->wait_lock); __rt_mutex_slowlock() 1174 raw_spin_lock(&lock->wait_lock); rt_mutex_slowlock() 1178 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock() 1210 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock() 1230 * This can be done without taking the @lock->wait_lock as rt_mutex_slowtrylock() 1240 raw_spin_lock(&lock->wait_lock); rt_mutex_slowtrylock() 1250 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowtrylock() 1261 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock() 1278 * raw_spin_unlock(foo->lock->wait_lock); rt_mutex_slowunlock() 1283 * lock->wait_lock. So we do the following sequence: rt_mutex_slowunlock() 1287 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock() 1293 * lock->owner is serialized by lock->wait_lock: rt_mutex_slowunlock() 1296 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock() 1299 /* Drops lock->wait_lock ! */ rt_mutex_slowunlock() 1303 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock() 1312 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock() 1495 raw_spin_lock_init(&lock->wait_lock); __rt_mutex_init() 1557 raw_spin_lock(&lock->wait_lock); rt_mutex_start_proxy_lock() 1560 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock() 1581 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock() 1629 raw_spin_lock(&lock->wait_lock); rt_mutex_finish_proxy_lock() 1645 raw_spin_unlock(&lock->wait_lock); rt_mutex_finish_proxy_lock()
|
H A D | mutex-debug.h | 14 * This must be called with lock->wait_lock held. 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
|
H A D | rwsem-xadd.c | 84 raw_spin_lock_init(&sem->wait_lock); __init_rwsem() 225 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_read_failed() 243 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_read_failed() 371 /* sem->wait_lock should not be held when doing optimistic spinning */ rwsem_optimistic_spin() 383 /* wait_lock will be acquired if write_lock is obtained */ rwsem_optimistic_spin() 443 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_write_failed() 471 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_write_failed() 479 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_write_failed() 484 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_write_failed() 499 raw_spin_lock_irqsave(&sem->wait_lock, flags); rwsem_wake() 505 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_wake() 521 raw_spin_lock_irqsave(&sem->wait_lock, flags); rwsem_downgrade_wake() 527 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_downgrade_wake()
|
H A D | mutex-debug.c | 28 * Must be called with lock->wait_lock held. 39 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_wake_waiter() 54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_add_waiter()
|
H A D | mutex.c | 39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this 53 spin_lock_init(&lock->wait_lock); __mutex_init() 171 * and keep spinning, or it will acquire wait_lock, add itself ww_mutex_set_context_fastpath() 186 spin_lock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath() 191 spin_unlock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath() 198 * Callers must hold the mutex wait_lock. 295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock 331 * by acquiring wait_lock there is a guarantee that mutex_optimistic_spin() 529 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common() 580 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common() 582 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common() 602 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common() 608 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common() 737 spin_lock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath() 752 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath() 871 spin_lock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath() 883 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath()
|
/linux-4.1.27/include/linux/ |
H A D | rtmutex.h | 24 * @wait_lock: spinlock to protect the structure 30 raw_spinlock_t wait_lock; member in struct:rt_mutex 70 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
H A D | mutex.h | 53 spinlock_t wait_lock; member in struct:mutex 111 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
H A D | rwsem.h | 30 raw_spinlock_t wait_lock; member in struct:rw_semaphore 77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
|
H A D | rwsem-spinlock.h | 25 raw_spinlock_t wait_lock; member in struct:rw_semaphore
|
H A D | tty_ldisc.h | 139 raw_spinlock_t wait_lock; member in struct:ld_semaphore
|
H A D | rcupdate.h | 940 * does not disable irqs while taking ->wait_lock.
|
/linux-4.1.27/drivers/tty/ |
H A D | tty_ldsem.c | 114 raw_spin_lock_init(&sem->wait_lock); __init_ldsem() 191 raw_spin_lock_irqsave(&sem->wait_lock, flags); ldsem_wake() 193 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); ldsem_wake() 207 raw_spin_lock_irq(&sem->wait_lock); down_read_failed() 216 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed() 231 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed() 250 raw_spin_lock_irq(&sem->wait_lock); down_read_failed() 254 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed() 258 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed() 276 raw_spin_lock_irq(&sem->wait_lock); down_write_failed() 285 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed() 298 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed() 300 raw_spin_lock_irq(&sem->wait_lock); down_write_failed() 309 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed()
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | sram242x.S | 292 wait_lock: label 296 bne wait_lock @ wait if not
|
H A D | sram243x.S | 292 wait_lock: label 296 bne wait_lock @ wait if not
|
/linux-4.1.27/kernel/ |
H A D | futex.c | 1142 raw_spin_lock(&pi_state->pi_mutex.wait_lock); wake_futex_pi() 1175 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); wake_futex_pi() 1190 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); wake_futex_pi() 2034 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); fixup_owner() 2038 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); fixup_owner()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | llite_internal.h | 208 * spinlock_t wait_lock; // align d.d_sa_lock
|