Lines Matching refs:lock

50 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)  in rt_mutex_set_owner()  argument
54 if (rt_mutex_has_waiters(lock)) in rt_mutex_set_owner()
57 lock->owner = (struct task_struct *)val; in rt_mutex_set_owner()
60 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) in clear_rt_mutex_waiters() argument
62 lock->owner = (struct task_struct *) in clear_rt_mutex_waiters()
63 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); in clear_rt_mutex_waiters()
66 static void fixup_rt_mutex_waiters(struct rt_mutex *lock) in fixup_rt_mutex_waiters() argument
68 if (!rt_mutex_has_waiters(lock)) in fixup_rt_mutex_waiters()
69 clear_rt_mutex_waiters(lock); in fixup_rt_mutex_waiters()
78 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
80 unsigned long owner, *p = (unsigned long *) &lock->owner; in mark_rt_mutex_waiters()
93 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) in unlock_rt_mutex_safe() argument
94 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
96 struct task_struct *owner = rt_mutex_owner(lock); in unlock_rt_mutex_safe()
98 clear_rt_mutex_waiters(lock); in unlock_rt_mutex_safe()
99 raw_spin_unlock(&lock->wait_lock); in unlock_rt_mutex_safe()
124 return rt_mutex_cmpxchg(lock, owner, NULL); in unlock_rt_mutex_safe()
129 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) in mark_rt_mutex_waiters() argument
131 lock->owner = (struct task_struct *) in mark_rt_mutex_waiters()
132 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS); in mark_rt_mutex_waiters()
138 static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) in unlock_rt_mutex_safe() argument
139 __releases(lock->wait_lock) in unlock_rt_mutex_safe()
141 lock->owner = NULL; in unlock_rt_mutex_safe()
142 raw_spin_unlock(&lock->wait_lock); in unlock_rt_mutex_safe()
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
169 struct rb_node **link = &lock->waiters.rb_node; in rt_mutex_enqueue()
186 lock->waiters_leftmost = &waiter->tree_entry; in rt_mutex_enqueue()
189 rb_insert_color(&waiter->tree_entry, &lock->waiters); in rt_mutex_enqueue()
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue() argument
198 if (lock->waiters_leftmost == &waiter->tree_entry) in rt_mutex_dequeue()
199 lock->waiters_leftmost = rb_next(&waiter->tree_entry); in rt_mutex_dequeue()
201 rb_erase(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue()
345 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; in task_blocked_on_lock()
421 struct rt_mutex *lock; in rt_mutex_adjust_prio_chain() local
501 if (next_lock != waiter->lock) in rt_mutex_adjust_prio_chain()
543 lock = waiter->lock; in rt_mutex_adjust_prio_chain()
549 if (!raw_spin_trylock(&lock->wait_lock)) { in rt_mutex_adjust_prio_chain()
564 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { in rt_mutex_adjust_prio_chain()
565 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); in rt_mutex_adjust_prio_chain()
566 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
588 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
589 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
594 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
608 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
612 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
625 prerequeue_top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
628 rt_mutex_dequeue(lock, waiter); in rt_mutex_adjust_prio_chain()
630 rt_mutex_enqueue(lock, waiter); in rt_mutex_adjust_prio_chain()
643 if (!rt_mutex_owner(lock)) { in rt_mutex_adjust_prio_chain()
649 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) in rt_mutex_adjust_prio_chain()
650 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
651 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
656 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
661 if (waiter == rt_mutex_top_waiter(lock)) { in rt_mutex_adjust_prio_chain()
684 waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
709 top_waiter = rt_mutex_top_waiter(lock); in rt_mutex_adjust_prio_chain()
713 raw_spin_unlock(&lock->wait_lock); in rt_mutex_adjust_prio_chain()
753 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
775 mark_rt_mutex_waiters(lock); in try_to_take_rt_mutex()
780 if (rt_mutex_owner(lock)) in try_to_take_rt_mutex()
793 if (waiter != rt_mutex_top_waiter(lock)) in try_to_take_rt_mutex()
800 rt_mutex_dequeue(lock, waiter); in try_to_take_rt_mutex()
811 if (rt_mutex_has_waiters(lock)) { in try_to_take_rt_mutex()
817 if (task->prio >= rt_mutex_top_waiter(lock)->prio) in try_to_take_rt_mutex()
849 if (rt_mutex_has_waiters(lock)) in try_to_take_rt_mutex()
850 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
855 debug_rt_mutex_lock(lock); in try_to_take_rt_mutex()
861 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
863 rt_mutex_deadlock_account_lock(lock, task); in try_to_take_rt_mutex()
875 static int task_blocks_on_rt_mutex(struct rt_mutex *lock, in task_blocks_on_rt_mutex() argument
880 struct task_struct *owner = rt_mutex_owner(lock); in task_blocks_on_rt_mutex()
901 waiter->lock = lock; in task_blocks_on_rt_mutex()
905 if (rt_mutex_has_waiters(lock)) in task_blocks_on_rt_mutex()
906 top_waiter = rt_mutex_top_waiter(lock); in task_blocks_on_rt_mutex()
907 rt_mutex_enqueue(lock, waiter); in task_blocks_on_rt_mutex()
917 if (waiter == rt_mutex_top_waiter(lock)) { in task_blocks_on_rt_mutex()
947 raw_spin_unlock(&lock->wait_lock); in task_blocks_on_rt_mutex()
949 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, in task_blocks_on_rt_mutex()
952 raw_spin_lock(&lock->wait_lock); in task_blocks_on_rt_mutex()
965 static void wakeup_next_waiter(struct rt_mutex *lock) in wakeup_next_waiter() argument
972 waiter = rt_mutex_top_waiter(lock); in wakeup_next_waiter()
990 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; in wakeup_next_waiter()
1008 static void remove_waiter(struct rt_mutex *lock, in remove_waiter() argument
1011 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); in remove_waiter()
1012 struct task_struct *owner = rt_mutex_owner(lock); in remove_waiter()
1017 rt_mutex_dequeue(lock, waiter); in remove_waiter()
1032 if (rt_mutex_has_waiters(lock)) in remove_waiter()
1033 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); in remove_waiter()
1052 raw_spin_unlock(&lock->wait_lock); in remove_waiter()
1054 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, in remove_waiter()
1057 raw_spin_lock(&lock->wait_lock); in remove_waiter()
1079 next_lock = waiter->lock; in rt_mutex_adjust_pi()
1100 __rt_mutex_slowlock(struct rt_mutex *lock, int state, in __rt_mutex_slowlock() argument
1108 if (try_to_take_rt_mutex(lock, current, waiter)) in __rt_mutex_slowlock()
1125 raw_spin_unlock(&lock->wait_lock); in __rt_mutex_slowlock()
1129 schedule_rt_mutex(lock); in __rt_mutex_slowlock()
1131 raw_spin_lock(&lock->wait_lock); in __rt_mutex_slowlock()
1163 rt_mutex_slowlock(struct rt_mutex *lock, int state, in rt_mutex_slowlock() argument
1174 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowlock()
1177 if (try_to_take_rt_mutex(lock, current, NULL)) { in rt_mutex_slowlock()
1178 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowlock()
1191 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); in rt_mutex_slowlock()
1195 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); in rt_mutex_slowlock()
1199 if (rt_mutex_has_waiters(lock)) in rt_mutex_slowlock()
1200 remove_waiter(lock, &waiter); in rt_mutex_slowlock()
1208 fixup_rt_mutex_waiters(lock); in rt_mutex_slowlock()
1210 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowlock()
1224 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) in rt_mutex_slowtrylock() argument
1233 if (rt_mutex_owner(lock)) in rt_mutex_slowtrylock()
1240 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowtrylock()
1242 ret = try_to_take_rt_mutex(lock, current, NULL); in rt_mutex_slowtrylock()
1248 fixup_rt_mutex_waiters(lock); in rt_mutex_slowtrylock()
1250 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowtrylock()
1259 rt_mutex_slowunlock(struct rt_mutex *lock) in rt_mutex_slowunlock() argument
1261 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowunlock()
1263 debug_rt_mutex_unlock(lock); in rt_mutex_slowunlock()
1298 while (!rt_mutex_has_waiters(lock)) { in rt_mutex_slowunlock()
1300 if (unlock_rt_mutex_safe(lock) == true) in rt_mutex_slowunlock()
1303 raw_spin_lock(&lock->wait_lock); in rt_mutex_slowunlock()
1310 wakeup_next_waiter(lock); in rt_mutex_slowunlock()
1312 raw_spin_unlock(&lock->wait_lock); in rt_mutex_slowunlock()
1325 rt_mutex_fastlock(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
1326 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_fastlock() argument
1330 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_fastlock()
1331 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_fastlock()
1334 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); in rt_mutex_fastlock()
1338 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
1341 int (*slowfn)(struct rt_mutex *lock, int state, in rt_mutex_timed_fastlock() argument
1346 likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_timed_fastlock()
1347 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_timed_fastlock()
1350 return slowfn(lock, state, timeout, chwalk); in rt_mutex_timed_fastlock()
1354 rt_mutex_fasttrylock(struct rt_mutex *lock, in rt_mutex_fasttrylock() argument
1355 int (*slowfn)(struct rt_mutex *lock)) in rt_mutex_fasttrylock() argument
1357 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { in rt_mutex_fasttrylock()
1358 rt_mutex_deadlock_account_lock(lock, current); in rt_mutex_fasttrylock()
1361 return slowfn(lock); in rt_mutex_fasttrylock()
1365 rt_mutex_fastunlock(struct rt_mutex *lock, in rt_mutex_fastunlock() argument
1366 void (*slowfn)(struct rt_mutex *lock)) in rt_mutex_fastunlock() argument
1368 if (likely(rt_mutex_cmpxchg(lock, current, NULL))) in rt_mutex_fastunlock()
1371 slowfn(lock); in rt_mutex_fastunlock()
1379 void __sched rt_mutex_lock(struct rt_mutex *lock) in rt_mutex_lock() argument
1383 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); in rt_mutex_lock()
1396 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) in rt_mutex_lock_interruptible() argument
1400 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); in rt_mutex_lock_interruptible()
1407 int rt_mutex_timed_futex_lock(struct rt_mutex *lock, in rt_mutex_timed_futex_lock() argument
1412 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, in rt_mutex_timed_futex_lock()
1431 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) in rt_mutex_timed_lock() argument
1435 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, in rt_mutex_timed_lock()
1448 int __sched rt_mutex_trylock(struct rt_mutex *lock) in rt_mutex_trylock() argument
1450 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); in rt_mutex_trylock()
1459 void __sched rt_mutex_unlock(struct rt_mutex *lock) in rt_mutex_unlock() argument
1461 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); in rt_mutex_unlock()
1473 void rt_mutex_destroy(struct rt_mutex *lock) in rt_mutex_destroy() argument
1475 WARN_ON(rt_mutex_is_locked(lock)); in rt_mutex_destroy()
1477 lock->magic = NULL; in rt_mutex_destroy()
1492 void __rt_mutex_init(struct rt_mutex *lock, const char *name) in __rt_mutex_init() argument
1494 lock->owner = NULL; in __rt_mutex_init()
1495 raw_spin_lock_init(&lock->wait_lock); in __rt_mutex_init()
1496 lock->waiters = RB_ROOT; in __rt_mutex_init()
1497 lock->waiters_leftmost = NULL; in __rt_mutex_init()
1499 debug_rt_mutex_init(lock, name); in __rt_mutex_init()
1513 void rt_mutex_init_proxy_locked(struct rt_mutex *lock, in rt_mutex_init_proxy_locked() argument
1516 __rt_mutex_init(lock, NULL); in rt_mutex_init_proxy_locked()
1517 debug_rt_mutex_proxy_lock(lock, proxy_owner); in rt_mutex_init_proxy_locked()
1518 rt_mutex_set_owner(lock, proxy_owner); in rt_mutex_init_proxy_locked()
1519 rt_mutex_deadlock_account_lock(lock, proxy_owner); in rt_mutex_init_proxy_locked()
1530 void rt_mutex_proxy_unlock(struct rt_mutex *lock, in rt_mutex_proxy_unlock() argument
1533 debug_rt_mutex_proxy_unlock(lock); in rt_mutex_proxy_unlock()
1534 rt_mutex_set_owner(lock, NULL); in rt_mutex_proxy_unlock()
1551 int rt_mutex_start_proxy_lock(struct rt_mutex *lock, in rt_mutex_start_proxy_lock() argument
1557 raw_spin_lock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1559 if (try_to_take_rt_mutex(lock, task, NULL)) { in rt_mutex_start_proxy_lock()
1560 raw_spin_unlock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1565 ret = task_blocks_on_rt_mutex(lock, waiter, task, in rt_mutex_start_proxy_lock()
1568 if (ret && !rt_mutex_owner(lock)) { in rt_mutex_start_proxy_lock()
1579 remove_waiter(lock, waiter); in rt_mutex_start_proxy_lock()
1581 raw_spin_unlock(&lock->wait_lock); in rt_mutex_start_proxy_lock()
1600 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) in rt_mutex_next_owner() argument
1602 if (!rt_mutex_has_waiters(lock)) in rt_mutex_next_owner()
1605 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()
1623 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, in rt_mutex_finish_proxy_lock() argument
1629 raw_spin_lock(&lock->wait_lock); in rt_mutex_finish_proxy_lock()
1634 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); in rt_mutex_finish_proxy_lock()
1637 remove_waiter(lock, waiter); in rt_mutex_finish_proxy_lock()
1643 fixup_rt_mutex_waiters(lock); in rt_mutex_finish_proxy_lock()
1645 raw_spin_unlock(&lock->wait_lock); in rt_mutex_finish_proxy_lock()