Lines Matching refs:sem

73 void __init_rwsem(struct rw_semaphore *sem, const char *name,  in __init_rwsem()  argument
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); in __init_rwsem()
81 lockdep_init_map(&sem->dep_map, name, key, 0); in __init_rwsem()
83 sem->count = RWSEM_UNLOCKED_VALUE; in __init_rwsem()
84 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem()
85 INIT_LIST_HEAD(&sem->wait_list); in __init_rwsem()
87 sem->owner = NULL; in __init_rwsem()
88 osq_lock_init(&sem->osq); in __init_rwsem()
122 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) in __rwsem_do_wake() argument
129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); in __rwsem_do_wake()
149 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; in __rwsem_do_wake()
152 if (rwsem_atomic_update(-adjustment, sem) & in __rwsem_do_wake()
168 if (waiter->list.next == &sem->wait_list) in __rwsem_do_wake()
182 rwsem_atomic_add(adjustment, sem); in __rwsem_do_wake()
184 next = sem->wait_list.next; in __rwsem_do_wake()
203 sem->wait_list.next = next; in __rwsem_do_wake()
204 next->prev = &sem->wait_list; in __rwsem_do_wake()
207 return sem; in __rwsem_do_wake()
214 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) in rwsem_down_read_failed() argument
225 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_failed()
226 if (list_empty(&sem->wait_list)) in rwsem_down_read_failed()
228 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_read_failed()
231 count = rwsem_atomic_update(adjustment, sem); in rwsem_down_read_failed()
241 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_down_read_failed()
243 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_failed()
254 return sem; in rwsem_down_read_failed()
258 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem) in rwsem_try_write_lock() argument
265 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, in rwsem_try_write_lock()
267 if (!list_is_singular(&sem->wait_list)) in rwsem_try_write_lock()
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_try_write_lock()
269 rwsem_set_owner(sem); in rwsem_try_write_lock()
280 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) in rwsem_try_write_lock_unqueued() argument
282 long old, count = READ_ONCE(sem->count); in rwsem_try_write_lock_unqueued()
288 old = cmpxchg(&sem->count, count, count + RWSEM_ACTIVE_WRITE_BIAS); in rwsem_try_write_lock_unqueued()
290 rwsem_set_owner(sem); in rwsem_try_write_lock_unqueued()
298 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) in rwsem_can_spin_on_owner() argument
307 owner = READ_ONCE(sem->owner); in rwsem_can_spin_on_owner()
309 long count = READ_ONCE(sem->count); in rwsem_can_spin_on_owner()
328 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner) in rwsem_spin_on_owner() argument
333 while (sem->owner == owner) { in rwsem_spin_on_owner()
352 if (READ_ONCE(sem->owner)) in rwsem_spin_on_owner()
360 count = READ_ONCE(sem->count); in rwsem_spin_on_owner()
364 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
372 if (!rwsem_can_spin_on_owner(sem)) in rwsem_optimistic_spin()
375 if (!osq_lock(&sem->osq)) in rwsem_optimistic_spin()
379 owner = READ_ONCE(sem->owner); in rwsem_optimistic_spin()
380 if (owner && !rwsem_spin_on_owner(sem, owner)) in rwsem_optimistic_spin()
384 if (rwsem_try_write_lock_unqueued(sem)) { in rwsem_optimistic_spin()
406 osq_unlock(&sem->osq); in rwsem_optimistic_spin()
413 static bool rwsem_optimistic_spin(struct rw_semaphore *sem) in rwsem_optimistic_spin() argument
423 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) in rwsem_down_write_failed() argument
430 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem); in rwsem_down_write_failed()
433 if (rwsem_optimistic_spin(sem)) in rwsem_down_write_failed()
434 return sem; in rwsem_down_write_failed()
443 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
446 if (list_empty(&sem->wait_list)) in rwsem_down_write_failed()
449 list_add_tail(&waiter.list, &sem->wait_list); in rwsem_down_write_failed()
453 count = READ_ONCE(sem->count); in rwsem_down_write_failed()
461 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); in rwsem_down_write_failed()
464 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem); in rwsem_down_write_failed()
469 if (rwsem_try_write_lock(count, sem)) in rwsem_down_write_failed()
471 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
477 } while ((count = sem->count) & RWSEM_ACTIVE_MASK); in rwsem_down_write_failed()
479 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_write_failed()
484 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_write_failed()
486 return sem; in rwsem_down_write_failed()
495 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) in rwsem_wake() argument
499 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_wake()
502 if (!list_empty(&sem->wait_list)) in rwsem_wake()
503 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); in rwsem_wake()
505 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_wake()
507 return sem; in rwsem_wake()
517 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) in rwsem_downgrade_wake() argument
521 raw_spin_lock_irqsave(&sem->wait_lock, flags); in rwsem_downgrade_wake()
524 if (!list_empty(&sem->wait_list)) in rwsem_downgrade_wake()
525 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); in rwsem_downgrade_wake()
527 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in rwsem_downgrade_wake()
529 return sem; in rwsem_downgrade_wake()