Searched refs:wait_lock (Results 1 - 19 of 19) sorted by relevance

/linux-4.4.14/include/asm-generic/
H A Dqrwlock_types.h13 arch_spinlock_t wait_lock; member in struct:qrwlock
18 .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
/linux-4.4.14/kernel/locking/
H A Drwsem-spinlock.c28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { rwsem_is_locked()
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_is_locked()
50 raw_spin_lock_init(&sem->wait_lock); __init_rwsem()
134 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_read()
139 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read()
154 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read()
178 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_read_trylock()
186 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_read_trylock()
200 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_nested()
219 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_nested()
221 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_nested()
227 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_nested()
243 raw_spin_lock_irqsave(&sem->wait_lock, flags); __down_write_trylock()
251 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __down_write_trylock()
263 raw_spin_lock_irqsave(&sem->wait_lock, flags); __up_read()
268 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __up_read()
278 raw_spin_lock_irqsave(&sem->wait_lock, flags); __up_write()
284 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __up_write()
295 raw_spin_lock_irqsave(&sem->wait_lock, flags); __downgrade_write()
301 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); __downgrade_write()
H A Drtmutex.c39 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
82 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
99 * 2) Drop lock->wait_lock
103 __releases(lock->wait_lock)
108 raw_spin_unlock(&lock->wait_lock);
113 * unlock(wait_lock);
114 * lock(wait_lock);
120 * unlock(wait_lock);
121 * lock(wait_lock);
126 * unlock(wait_lock);
127 * lock(wait_lock);
129 * unlock(wait_lock);
130 * lock(wait_lock);
148 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
151 __releases(lock->wait_lock)
154 raw_spin_unlock(&lock->wait_lock);
311 * (Note: We do this outside of the protection of lock->wait_lock to
386 * [L] rtmutex->wait_lock held
406 * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
421 * unlock(lock->wait_lock); release [L]
562 if (!raw_spin_trylock(&lock->wait_lock)) { rt_mutex_adjust_prio_chain()
570 * lock->wait_lock. rt_mutex_adjust_prio_chain()
579 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
598 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain()
602 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
625 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
650 * [9] check_exit_conditions_3 protected by lock->wait_lock. rt_mutex_adjust_prio_chain()
664 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
709 * and lock->wait_lock. The actual decisions are made after we rt_mutex_adjust_prio_chain()
726 raw_spin_unlock(&lock->wait_lock); rt_mutex_adjust_prio_chain()
759 * Must be called with lock->wait_lock held.
775 * and they serialize on @lock->wait_lock. try_to_take_rt_mutex()
886 * This must be called with lock->wait_lock held.
955 * so the owner struct is protected by wait_lock. task_blocks_on_rt_mutex()
960 raw_spin_unlock(&lock->wait_lock); task_blocks_on_rt_mutex()
965 raw_spin_lock(&lock->wait_lock); task_blocks_on_rt_mutex()
974 * Called with lock->wait_lock held.
990 * lock->wait_lock. mark_wakeup_next_waiter()
1012 * Must be called with lock->wait_lock held and
1059 raw_spin_unlock(&lock->wait_lock); remove_waiter()
1064 raw_spin_lock(&lock->wait_lock); remove_waiter()
1104 * lock->wait_lock must be held by the caller.
1132 raw_spin_unlock(&lock->wait_lock); __rt_mutex_slowlock()
1138 raw_spin_lock(&lock->wait_lock); __rt_mutex_slowlock()
1181 raw_spin_lock(&lock->wait_lock); rt_mutex_slowlock()
1185 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock()
1214 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowlock()
1234 * This can be done without taking the @lock->wait_lock as rt_mutex_slowtrylock()
1244 raw_spin_lock(&lock->wait_lock); rt_mutex_slowtrylock()
1254 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowtrylock()
1266 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock()
1283 * raw_spin_unlock(foo->lock->wait_lock); rt_mutex_slowunlock()
1288 * lock->wait_lock. So we do the following sequence: rt_mutex_slowunlock()
1292 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1298 * lock->owner is serialized by lock->wait_lock: rt_mutex_slowunlock()
1301 * raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1304 /* Drops lock->wait_lock ! */ rt_mutex_slowunlock()
1308 raw_spin_lock(&lock->wait_lock); rt_mutex_slowunlock()
1315 * Queue the next waiter for wakeup once we release the wait_lock. rt_mutex_slowunlock()
1319 raw_spin_unlock(&lock->wait_lock); rt_mutex_slowunlock()
1537 raw_spin_lock_init(&lock->wait_lock); __rt_mutex_init()
1599 raw_spin_lock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1602 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1623 raw_spin_unlock(&lock->wait_lock); rt_mutex_start_proxy_lock()
1671 raw_spin_lock(&lock->wait_lock); rt_mutex_finish_proxy_lock()
1687 raw_spin_unlock(&lock->wait_lock); rt_mutex_finish_proxy_lock()
H A Dmutex-debug.h14 * This must be called with lock->wait_lock held.
42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
H A Dmutex-debug.c28 * Must be called with lock->wait_lock held.
39 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_wake_waiter()
54 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); debug_mutex_add_waiter()
H A Dqrwlock.c89 arch_spin_lock(&lock->wait_lock); queued_read_lock_slowpath()
102 arch_spin_unlock(&lock->wait_lock); queued_read_lock_slowpath()
115 arch_spin_lock(&lock->wait_lock); queued_write_lock_slowpath()
147 arch_spin_unlock(&lock->wait_lock); queued_write_lock_slowpath()
H A Drwsem-xadd.c84 raw_spin_lock_init(&sem->wait_lock); __init_rwsem()
225 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_read_failed()
243 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_read_failed()
372 /* sem->wait_lock should not be held when doing optimistic spinning */ rwsem_optimistic_spin()
384 /* wait_lock will be acquired if write_lock is obtained */ rwsem_optimistic_spin()
457 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_write_failed()
485 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_write_failed()
493 raw_spin_lock_irq(&sem->wait_lock); rwsem_down_write_failed()
498 raw_spin_unlock_irq(&sem->wait_lock); rwsem_down_write_failed()
523 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock) rwsem_wake()
536 * state is consulted before reading the wait_lock. rwsem_wake()
539 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) rwsem_wake()
543 raw_spin_lock_irqsave(&sem->wait_lock, flags); rwsem_wake()
550 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_wake()
566 raw_spin_lock_irqsave(&sem->wait_lock, flags); rwsem_downgrade_wake()
572 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); rwsem_downgrade_wake()
H A Dmutex.c39 * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
53 spin_lock_init(&lock->wait_lock); __mutex_init()
171 * and keep spinning, or it will acquire wait_lock, add itself ww_mutex_set_context_fastpath()
186 spin_lock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath()
191 spin_unlock_mutex(&lock->base.wait_lock, flags); ww_mutex_set_context_fastpath()
198 * Callers must hold the mutex wait_lock.
295 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
331 * by acquiring wait_lock there is a guarantee that mutex_optimistic_spin()
526 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
578 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
580 spin_lock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
600 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
606 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_lock_common()
735 spin_lock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath()
750 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_unlock_common_slowpath()
869 spin_lock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath()
881 spin_unlock_mutex(&lock->wait_lock, flags); __mutex_trylock_slowpath()
/linux-4.4.14/include/linux/
H A Drtmutex.h24 * @wait_lock: spinlock to protect the structure
30 raw_spinlock_t wait_lock; member in struct:rt_mutex
70 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
H A Dmutex.h53 spinlock_t wait_lock; member in struct:mutex
111 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
H A Drwsem.h30 raw_spinlock_t wait_lock; member in struct:rw_semaphore
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
H A Drwsem-spinlock.h25 raw_spinlock_t wait_lock; member in struct:rw_semaphore
H A Dtty_ldisc.h139 raw_spinlock_t wait_lock; member in struct:ld_semaphore
H A Drcupdate.h887 * does not disable irqs while taking ->wait_lock.
/linux-4.4.14/drivers/tty/
H A Dtty_ldsem.c114 raw_spin_lock_init(&sem->wait_lock); __init_ldsem()
191 raw_spin_lock_irqsave(&sem->wait_lock, flags); ldsem_wake()
193 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); ldsem_wake()
207 raw_spin_lock_irq(&sem->wait_lock); down_read_failed()
216 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed()
231 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed()
250 raw_spin_lock_irq(&sem->wait_lock); down_read_failed()
254 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed()
258 raw_spin_unlock_irq(&sem->wait_lock); down_read_failed()
276 raw_spin_lock_irq(&sem->wait_lock); down_write_failed()
285 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed()
298 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed()
300 raw_spin_lock_irq(&sem->wait_lock); down_write_failed()
310 raw_spin_unlock_irq(&sem->wait_lock); down_write_failed()
/linux-4.4.14/arch/arm/mach-omap2/
H A Dsram242x.S292 wait_lock: label
296 bne wait_lock @ wait if not
H A Dsram243x.S292 wait_lock: label
296 bne wait_lock @ wait if not
/linux-4.4.14/kernel/
H A Dfutex.c1226 raw_spin_lock(&pi_state->pi_mutex.wait_lock); wake_futex_pi()
1262 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); wake_futex_pi()
1277 raw_spin_unlock(&pi_state->pi_mutex.wait_lock); wake_futex_pi()
2142 raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); fixup_owner()
2146 raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); fixup_owner()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
H A Dllite_internal.h200 * spinlock_t wait_lock; // align d.d_sa_lock

Completed in 537 milliseconds