Lines Matching refs:sem
73 void __init_rwsem(struct rw_semaphore *sem, const char *name, in __init_rwsem() argument
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
81 lockdep_init_map(&sem->dep_map, name, key, 0); in __init_rwsem()
83 sem->count = RWSEM_UNLOCKED_VALUE; in __init_rwsem()
84 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
85 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
87 sem->owner = NULL; in __init_rwsem()
88 osq_lock_init(&sem->osq); in __init_rwsem()
122 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) in __rwsem_do_wake() argument
129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); in __rwsem_do_wake()
149 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; in __rwsem_do_wake()
152 if (rwsem_atomic_update(-adjustment, sem) & in __rwsem_do_wake()
168 if (waiter->list.next == &sem->wait_list) in __rwsem_do_wake()
182 rwsem_atomic_add(adjustment, sem); in __rwsem_do_wake()
184 next = sem->wait_list.next; in __rwsem_do_wake()
203 sem->wait_list.next = next; in __rwsem_do_wake()
204 next->prev = &sem->wait_list; in __rwsem_do_wake()
207 return sem; in __rwsem_do_wake()
214 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) in rwsem_down_read_failed() argument
225 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_failed()
226 if (list_empty(&sem->wait_list)) in rwsem_down_read_failed()
228 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_read_failed()
231 count = rwsem_atomic_update(adjustment, sem); in rwsem_down_read_failed()
241 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_down_read_failed()
243 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_failed()
254 return sem; in rwsem_down_read_failed()
258 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) in rwsem_try_write_lock() argument
265 cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, in rwsem_try_write_lock()
267 if (!list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_try_write_lock()
269 rwsem_set_owner(sem); in rwsem_try_write_lock()
280 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
282 long old, count = READ_ONCE(sem->count); in rwsem_try_write_lock_unqueued()
288 old = cmpxchg_acquire(&sem->count, count, in rwsem_try_write_lock_unqueued()
291 rwsem_set_owner(sem); in rwsem_try_write_lock_unqueued()
299 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
308 owner = READ_ONCE(sem->owner); in rwsem_can_spin_on_owner()
310 long count = READ_ONCE(sem->count); in rwsem_can_spin_on_owner()
329 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) in rwsem_spin_on_owner() argument
334 while (sem->owner == owner) { in rwsem_spin_on_owner()
353 if (READ_ONCE(sem->owner)) in rwsem_spin_on_owner()
361 count = READ_ONCE(sem->count); in rwsem_spin_on_owner()
365 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
373 if (!rwsem_can_spin_on_owner(sem)) in rwsem_optimistic_spin()
376 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
380 owner = READ_ONCE(sem->owner); in rwsem_optimistic_spin()
381 if (owner && !rwsem_spin_on_owner(sem, owner)) in rwsem_optimistic_spin()
385 if (rwsem_try_write_lock_unqueued(sem)) { in rwsem_optimistic_spin()
407 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
416 static inline bool rwsem_has_spinner(struct rw_semaphore *sem) in rwsem_has_spinner() argument
418 return osq_is_locked(&sem->osq); in rwsem_has_spinner()
422 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
427 static inline bool rwsem_has_spinner(struct rw_semaphore *sem) in rwsem_has_spinner() argument
437 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) in rwsem_down_write_failed() argument
444 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); in rwsem_down_write_failed()
447 if (rwsem_optimistic_spin(sem)) in rwsem_down_write_failed()
448 return sem; in rwsem_down_write_failed()
457 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
460 if (list_empty(&sem->wait_list)) in rwsem_down_write_failed()
463 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_write_failed()
467 count = READ_ONCE(sem->count); in rwsem_down_write_failed()
475 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); in rwsem_down_write_failed()
478 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_down_write_failed()
483 if (rwsem_try_write_lock(count, sem)) in rwsem_down_write_failed()
485 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
491 } while ((count = sem->count) & RWSEM_ACTIVE_MASK); in rwsem_down_write_failed()
493 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
498 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
500 return sem; in rwsem_down_write_failed()
509 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
533 if (rwsem_has_spinner(sem)) { in rwsem_wake()
539 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags)) in rwsem_wake()
540 return sem; in rwsem_wake()
543 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
547 if (!list_empty(&sem->wait_list)) in rwsem_wake()
548 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_wake()
550 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
552 return sem; in rwsem_wake()
562 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
566 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
569 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
570 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); in rwsem_downgrade_wake()
572 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
574 return sem; in rwsem_downgrade_wake()