Lines Matching refs:lock

50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)  in __mutex_init()  argument
52 atomic_set(&lock->count, 1); in __mutex_init()
53 spin_lock_init(&lock->wait_lock); in __mutex_init()
54 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
55 mutex_clear_owner(lock); in __mutex_init()
57 osq_lock_init(&lock->osq); in __mutex_init()
60 debug_mutex_init(lock, name, key); in __mutex_init()
95 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); in mutex_lock()
103 mutex_set_owner(lock); in mutex_lock()
157 ww_mutex_set_context_fastpath(struct ww_mutex *lock, in ww_mutex_set_context_fastpath() argument
163 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath()
165 lock->ctx = ctx; in ww_mutex_set_context_fastpath()
179 if (likely(atomic_read(&lock->base.count) == 0)) in ww_mutex_set_context_fastpath()
186 spin_lock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
187 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_fastpath()
188 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_fastpath()
191 spin_unlock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
201 ww_mutex_set_context_slowpath(struct ww_mutex *lock, in ww_mutex_set_context_slowpath() argument
206 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_slowpath()
207 lock->ctx = ctx; in ww_mutex_set_context_slowpath()
213 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_slowpath()
214 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_slowpath()
225 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) in mutex_spin_on_owner() argument
230 while (lock->owner == owner) { in mutex_spin_on_owner()
254 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner() argument
263 owner = READ_ONCE(lock->owner); in mutex_can_spin_on_owner()
277 static inline bool mutex_try_to_acquire(struct mutex *lock) in mutex_try_to_acquire() argument
279 return !mutex_is_locked(lock) && in mutex_try_to_acquire()
280 (atomic_cmpxchg(&lock->count, 1, 0) == 1); in mutex_try_to_acquire()
306 static bool mutex_optimistic_spin(struct mutex *lock, in mutex_optimistic_spin() argument
311 if (!mutex_can_spin_on_owner(lock)) in mutex_optimistic_spin()
319 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
328 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
345 owner = READ_ONCE(lock->owner); in mutex_optimistic_spin()
346 if (owner && !mutex_spin_on_owner(lock, owner)) in mutex_optimistic_spin()
350 if (mutex_try_to_acquire(lock)) { in mutex_optimistic_spin()
351 lock_acquired(&lock->dep_map, ip); in mutex_optimistic_spin()
355 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
360 mutex_set_owner(lock); in mutex_optimistic_spin()
361 osq_unlock(&lock->osq); in mutex_optimistic_spin()
383 osq_unlock(&lock->osq); in mutex_optimistic_spin()
402 static bool mutex_optimistic_spin(struct mutex *lock, in mutex_optimistic_spin() argument
423 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
435 mutex_clear_owner(lock); in mutex_unlock()
437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); in mutex_unlock()
453 void __sched ww_mutex_unlock(struct ww_mutex *lock) in ww_mutex_unlock() argument
459 if (lock->ctx) { in ww_mutex_unlock()
461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); in ww_mutex_unlock()
463 if (lock->ctx->acquired > 0) in ww_mutex_unlock()
464 lock->ctx->acquired--; in ww_mutex_unlock()
465 lock->ctx = NULL; in ww_mutex_unlock()
474 mutex_clear_owner(&lock->base); in ww_mutex_unlock()
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); in ww_mutex_unlock()
481 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_check_stamp() argument
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_lock_check_stamp()
505 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, in __mutex_lock_common() argument
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
521 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
523 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { in __mutex_lock_common()
529 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
535 if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1)) in __mutex_lock_common()
538 debug_mutex_lock_common(lock, &waiter); in __mutex_lock_common()
539 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); in __mutex_lock_common()
542 list_add_tail(&waiter.list, &lock->wait_list); in __mutex_lock_common()
545 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
558 if (atomic_read(&lock->count) >= 0 && in __mutex_lock_common()
559 (atomic_xchg(&lock->count, -1) == 1)) in __mutex_lock_common()
572 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); in __mutex_lock_common()
580 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
582 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
586 mutex_remove_waiter(lock, &waiter, current_thread_info()); in __mutex_lock_common()
588 if (likely(list_empty(&lock->wait_list))) in __mutex_lock_common()
589 atomic_set(&lock->count, 0); in __mutex_lock_common()
594 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
595 mutex_set_owner(lock); in __mutex_lock_common()
598 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
602 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); in __mutex_lock_common()
608 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
610 mutex_release(&lock->dep_map, 1, ip); in __mutex_lock_common()
617 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
620 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in mutex_lock_nested()
627 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock() argument
630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in _mutex_lock_nest_lock()
637 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested() argument
640 return __mutex_lock_common(lock, TASK_KILLABLE, in mutex_lock_killable_nested()
646 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested() argument
649 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, in mutex_lock_interruptible_nested()
656 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument
670 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
672 ww_mutex_unlock(lock); in ww_mutex_deadlock_injection()
682 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock() argument
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, in __ww_mutex_lock()
690 return ww_mutex_deadlock_injection(lock, ctx); in __ww_mutex_lock()
697 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_interruptible() argument
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, in __ww_mutex_lock_interruptible()
706 return ww_mutex_deadlock_injection(lock, ctx); in __ww_mutex_lock_interruptible()
718 __mutex_unlock_common_slowpath(struct mutex *lock, int nested) in __mutex_unlock_common_slowpath() argument
735 atomic_set(&lock->count, 1); in __mutex_unlock_common_slowpath()
737 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
738 mutex_release(&lock->dep_map, nested, _RET_IP_); in __mutex_unlock_common_slowpath()
739 debug_mutex_unlock(lock); in __mutex_unlock_common_slowpath()
741 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_common_slowpath()
744 list_entry(lock->wait_list.next, in __mutex_unlock_common_slowpath()
747 debug_mutex_wake_waiter(lock, waiter); in __mutex_unlock_common_slowpath()
752 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
761 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_unlock_slowpath() local
763 __mutex_unlock_common_slowpath(lock, 1); in __mutex_unlock_slowpath()
772 __mutex_lock_killable_slowpath(struct mutex *lock);
775 __mutex_lock_interruptible_slowpath(struct mutex *lock);
788 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
793 ret = __mutex_fastpath_lock_retval(&lock->count); in mutex_lock_interruptible()
795 mutex_set_owner(lock); in mutex_lock_interruptible()
798 return __mutex_lock_interruptible_slowpath(lock); in mutex_lock_interruptible()
803 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
808 ret = __mutex_fastpath_lock_retval(&lock->count); in mutex_lock_killable()
810 mutex_set_owner(lock); in mutex_lock_killable()
813 return __mutex_lock_killable_slowpath(lock); in mutex_lock_killable()
820 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_lock_slowpath() local
822 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, in __mutex_lock_slowpath()
827 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath() argument
829 return __mutex_lock_common(lock, TASK_KILLABLE, 0, in __mutex_lock_killable_slowpath()
834 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath() argument
836 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, in __mutex_lock_interruptible_slowpath()
841 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_slowpath() argument
843 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
848 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, in __ww_mutex_lock_interruptible_slowpath() argument
851 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
863 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_trylock_slowpath() local
868 if (mutex_is_locked(lock)) in __mutex_trylock_slowpath()
871 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
873 prev = atomic_xchg(&lock->count, -1); in __mutex_trylock_slowpath()
875 mutex_set_owner(lock); in __mutex_trylock_slowpath()
876 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in __mutex_trylock_slowpath()
880 if (likely(list_empty(&lock->wait_list))) in __mutex_trylock_slowpath()
881 atomic_set(&lock->count, 0); in __mutex_trylock_slowpath()
883 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
902 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
906 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); in mutex_trylock()
908 mutex_set_owner(lock); in mutex_trylock()
916 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock() argument
922 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock()
925 ww_mutex_set_context_fastpath(lock, ctx); in __ww_mutex_lock()
926 mutex_set_owner(&lock->base); in __ww_mutex_lock()
928 ret = __ww_mutex_lock_slowpath(lock, ctx); in __ww_mutex_lock()
934 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_interruptible() argument
940 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock_interruptible()
943 ww_mutex_set_context_fastpath(lock, ctx); in __ww_mutex_lock_interruptible()
944 mutex_set_owner(&lock->base); in __ww_mutex_lock_interruptible()
946 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); in __ww_mutex_lock_interruptible()
960 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument
966 mutex_lock(lock); in atomic_dec_and_mutex_lock()
969 mutex_unlock(lock); in atomic_dec_and_mutex_lock()