Lines Matching refs:lock

50 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)  in __mutex_init()  argument
52 atomic_set(&lock->count, 1); in __mutex_init()
53 spin_lock_init(&lock->wait_lock); in __mutex_init()
54 INIT_LIST_HEAD(&lock->wait_list); in __mutex_init()
55 mutex_clear_owner(lock); in __mutex_init()
57 osq_lock_init(&lock->osq); in __mutex_init()
60 debug_mutex_init(lock, name, key); in __mutex_init()
95 void __sched mutex_lock(struct mutex *lock) in mutex_lock() argument
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); in mutex_lock()
103 mutex_set_owner(lock); in mutex_lock()
157 ww_mutex_set_context_fastpath(struct ww_mutex *lock, in ww_mutex_set_context_fastpath() argument
163 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_fastpath()
165 lock->ctx = ctx; in ww_mutex_set_context_fastpath()
179 if (likely(atomic_read(&lock->base.count) == 0)) in ww_mutex_set_context_fastpath()
186 spin_lock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
187 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_fastpath()
188 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_fastpath()
191 spin_unlock_mutex(&lock->base.wait_lock, flags); in ww_mutex_set_context_fastpath()
201 ww_mutex_set_context_slowpath(struct ww_mutex *lock, in ww_mutex_set_context_slowpath() argument
206 ww_mutex_lock_acquired(lock, ctx); in ww_mutex_set_context_slowpath()
207 lock->ctx = ctx; in ww_mutex_set_context_slowpath()
213 list_for_each_entry(cur, &lock->base.wait_list, list) { in ww_mutex_set_context_slowpath()
214 debug_mutex_wake_waiter(&lock->base, cur); in ww_mutex_set_context_slowpath()
225 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) in mutex_spin_on_owner() argument
230 while (lock->owner == owner) { in mutex_spin_on_owner()
254 static inline int mutex_can_spin_on_owner(struct mutex *lock) in mutex_can_spin_on_owner() argument
263 owner = READ_ONCE(lock->owner); in mutex_can_spin_on_owner()
277 static inline bool mutex_try_to_acquire(struct mutex *lock) in mutex_try_to_acquire() argument
279 return !mutex_is_locked(lock) && in mutex_try_to_acquire()
280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1); in mutex_try_to_acquire()
306 static bool mutex_optimistic_spin(struct mutex *lock, in mutex_optimistic_spin() argument
311 if (!mutex_can_spin_on_owner(lock)) in mutex_optimistic_spin()
319 if (!osq_lock(&lock->osq)) in mutex_optimistic_spin()
328 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
345 owner = READ_ONCE(lock->owner); in mutex_optimistic_spin()
346 if (owner && !mutex_spin_on_owner(lock, owner)) in mutex_optimistic_spin()
350 if (mutex_try_to_acquire(lock)) { in mutex_optimistic_spin()
351 lock_acquired(&lock->dep_map, ip); in mutex_optimistic_spin()
355 ww = container_of(lock, struct ww_mutex, base); in mutex_optimistic_spin()
360 mutex_set_owner(lock); in mutex_optimistic_spin()
361 osq_unlock(&lock->osq); in mutex_optimistic_spin()
383 osq_unlock(&lock->osq); in mutex_optimistic_spin()
402 static bool mutex_optimistic_spin(struct mutex *lock, in mutex_optimistic_spin() argument
423 void __sched mutex_unlock(struct mutex *lock) in mutex_unlock() argument
435 mutex_clear_owner(lock); in mutex_unlock()
437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); in mutex_unlock()
453 void __sched ww_mutex_unlock(struct ww_mutex *lock) in ww_mutex_unlock() argument
459 if (lock->ctx) { in ww_mutex_unlock()
461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); in ww_mutex_unlock()
463 if (lock->ctx->acquired > 0) in ww_mutex_unlock()
464 lock->ctx->acquired--; in ww_mutex_unlock()
465 lock->ctx = NULL; in ww_mutex_unlock()
474 mutex_clear_owner(&lock->base); in ww_mutex_unlock()
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath); in ww_mutex_unlock()
481 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_check_stamp() argument
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __ww_mutex_lock_check_stamp()
508 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, in __mutex_lock_common() argument
518 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); in __mutex_lock_common()
520 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { in __mutex_lock_common()
526 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
532 if (!mutex_is_locked(lock) && in __mutex_lock_common()
533 (atomic_xchg_acquire(&lock->count, 0) == 1)) in __mutex_lock_common()
536 debug_mutex_lock_common(lock, &waiter); in __mutex_lock_common()
537 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); in __mutex_lock_common()
540 list_add_tail(&waiter.list, &lock->wait_list); in __mutex_lock_common()
543 lock_contended(&lock->dep_map, ip); in __mutex_lock_common()
556 if (atomic_read(&lock->count) >= 0 && in __mutex_lock_common()
557 (atomic_xchg_acquire(&lock->count, -1) == 1)) in __mutex_lock_common()
570 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); in __mutex_lock_common()
578 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
580 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
584 mutex_remove_waiter(lock, &waiter, current_thread_info()); in __mutex_lock_common()
586 if (likely(list_empty(&lock->wait_list))) in __mutex_lock_common()
587 atomic_set(&lock->count, 0); in __mutex_lock_common()
592 lock_acquired(&lock->dep_map, ip); in __mutex_lock_common()
593 mutex_set_owner(lock); in __mutex_lock_common()
596 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); in __mutex_lock_common()
600 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
605 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); in __mutex_lock_common()
606 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_lock_common()
608 mutex_release(&lock->dep_map, 1, ip); in __mutex_lock_common()
615 mutex_lock_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_nested() argument
618 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in mutex_lock_nested()
625 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) in _mutex_lock_nest_lock() argument
628 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, in _mutex_lock_nest_lock()
635 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_killable_nested() argument
638 return __mutex_lock_common(lock, TASK_KILLABLE, in mutex_lock_killable_nested()
644 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) in mutex_lock_interruptible_nested() argument
647 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, in mutex_lock_interruptible_nested()
654 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in ww_mutex_deadlock_injection() argument
668 ctx->contending_lock = lock; in ww_mutex_deadlock_injection()
670 ww_mutex_unlock(lock); in ww_mutex_deadlock_injection()
680 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock() argument
685 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, in __ww_mutex_lock()
688 return ww_mutex_deadlock_injection(lock, ctx); in __ww_mutex_lock()
695 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_interruptible() argument
700 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, in __ww_mutex_lock_interruptible()
704 return ww_mutex_deadlock_injection(lock, ctx); in __ww_mutex_lock_interruptible()
716 __mutex_unlock_common_slowpath(struct mutex *lock, int nested) in __mutex_unlock_common_slowpath() argument
733 atomic_set(&lock->count, 1); in __mutex_unlock_common_slowpath()
735 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
736 mutex_release(&lock->dep_map, nested, _RET_IP_); in __mutex_unlock_common_slowpath()
737 debug_mutex_unlock(lock); in __mutex_unlock_common_slowpath()
739 if (!list_empty(&lock->wait_list)) { in __mutex_unlock_common_slowpath()
742 list_entry(lock->wait_list.next, in __mutex_unlock_common_slowpath()
745 debug_mutex_wake_waiter(lock, waiter); in __mutex_unlock_common_slowpath()
750 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_unlock_common_slowpath()
759 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_unlock_slowpath() local
761 __mutex_unlock_common_slowpath(lock, 1); in __mutex_unlock_slowpath()
770 __mutex_lock_killable_slowpath(struct mutex *lock);
773 __mutex_lock_interruptible_slowpath(struct mutex *lock);
786 int __sched mutex_lock_interruptible(struct mutex *lock) in mutex_lock_interruptible() argument
791 ret = __mutex_fastpath_lock_retval(&lock->count); in mutex_lock_interruptible()
793 mutex_set_owner(lock); in mutex_lock_interruptible()
796 return __mutex_lock_interruptible_slowpath(lock); in mutex_lock_interruptible()
801 int __sched mutex_lock_killable(struct mutex *lock) in mutex_lock_killable() argument
806 ret = __mutex_fastpath_lock_retval(&lock->count); in mutex_lock_killable()
808 mutex_set_owner(lock); in mutex_lock_killable()
811 return __mutex_lock_killable_slowpath(lock); in mutex_lock_killable()
818 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_lock_slowpath() local
820 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, in __mutex_lock_slowpath()
825 __mutex_lock_killable_slowpath(struct mutex *lock) in __mutex_lock_killable_slowpath() argument
827 return __mutex_lock_common(lock, TASK_KILLABLE, 0, in __mutex_lock_killable_slowpath()
832 __mutex_lock_interruptible_slowpath(struct mutex *lock) in __mutex_lock_interruptible_slowpath() argument
834 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, in __mutex_lock_interruptible_slowpath()
839 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_slowpath() argument
841 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0, in __ww_mutex_lock_slowpath()
846 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, in __ww_mutex_lock_interruptible_slowpath() argument
849 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0, in __ww_mutex_lock_interruptible_slowpath()
861 struct mutex *lock = container_of(lock_count, struct mutex, count); in __mutex_trylock_slowpath() local
866 if (mutex_is_locked(lock)) in __mutex_trylock_slowpath()
869 spin_lock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
871 prev = atomic_xchg_acquire(&lock->count, -1); in __mutex_trylock_slowpath()
873 mutex_set_owner(lock); in __mutex_trylock_slowpath()
874 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); in __mutex_trylock_slowpath()
878 if (likely(list_empty(&lock->wait_list))) in __mutex_trylock_slowpath()
879 atomic_set(&lock->count, 0); in __mutex_trylock_slowpath()
881 spin_unlock_mutex(&lock->wait_lock, flags); in __mutex_trylock_slowpath()
900 int __sched mutex_trylock(struct mutex *lock) in mutex_trylock() argument
904 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); in mutex_trylock()
906 mutex_set_owner(lock); in mutex_trylock()
914 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock() argument
920 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock()
923 ww_mutex_set_context_fastpath(lock, ctx); in __ww_mutex_lock()
924 mutex_set_owner(&lock->base); in __ww_mutex_lock()
926 ret = __ww_mutex_lock_slowpath(lock, ctx); in __ww_mutex_lock()
932 __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) in __ww_mutex_lock_interruptible() argument
938 ret = __mutex_fastpath_lock_retval(&lock->base.count); in __ww_mutex_lock_interruptible()
941 ww_mutex_set_context_fastpath(lock, ctx); in __ww_mutex_lock_interruptible()
942 mutex_set_owner(&lock->base); in __ww_mutex_lock_interruptible()
944 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx); in __ww_mutex_lock_interruptible()
958 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) in atomic_dec_and_mutex_lock() argument
964 mutex_lock(lock); in atomic_dec_and_mutex_lock()
967 mutex_unlock(lock); in atomic_dec_and_mutex_lock()