Lines Matching refs:task
173 return dl_time_before(left->task->dl.deadline, in rt_mutex_waiter_less()
174 right->task->dl.deadline); in rt_mutex_waiter_less()
219 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument
221 struct rb_node **link = &task->pi_waiters.rb_node; in rt_mutex_enqueue_pi()
238 task->pi_waiters_leftmost = &waiter->pi_tree_entry; in rt_mutex_enqueue_pi()
241 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_enqueue_pi()
245 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument
250 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) in rt_mutex_dequeue_pi()
251 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
253 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
263 int rt_mutex_getprio(struct task_struct *task) in rt_mutex_getprio() argument
265 if (likely(!task_has_pi_waiters(task))) in rt_mutex_getprio()
266 return task->normal_prio; in rt_mutex_getprio()
268 return min(task_top_pi_waiter(task)->prio, in rt_mutex_getprio()
269 task->normal_prio); in rt_mutex_getprio()
272 struct task_struct *rt_mutex_get_top_task(struct task_struct *task) in rt_mutex_get_top_task() argument
274 if (likely(!task_has_pi_waiters(task))) in rt_mutex_get_top_task()
277 return task_top_pi_waiter(task)->task; in rt_mutex_get_top_task()
284 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) in rt_mutex_get_effective_prio() argument
286 if (!task_has_pi_waiters(task)) in rt_mutex_get_effective_prio()
289 if (task_top_pi_waiter(task)->task->prio <= newprio) in rt_mutex_get_effective_prio()
290 return task_top_pi_waiter(task)->task->prio; in rt_mutex_get_effective_prio()
299 static void __rt_mutex_adjust_prio(struct task_struct *task) in __rt_mutex_adjust_prio() argument
301 int prio = rt_mutex_getprio(task); in __rt_mutex_adjust_prio()
303 if (task->prio != prio || dl_prio(prio)) in __rt_mutex_adjust_prio()
304 rt_mutex_setprio(task, prio); in __rt_mutex_adjust_prio()
316 void rt_mutex_adjust_prio(struct task_struct *task) in rt_mutex_adjust_prio() argument
320 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio()
321 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio()
322 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio()
424 static int rt_mutex_adjust_prio_chain(struct task_struct *task, in rt_mutex_adjust_prio_chain() argument
464 put_task_struct(task); in rt_mutex_adjust_prio_chain()
479 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
484 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
523 if (!task_has_pi_waiters(task)) in rt_mutex_adjust_prio_chain()
531 if (top_waiter != task_top_pi_waiter(task)) { in rt_mutex_adjust_prio_chain()
546 if (waiter->prio == task->prio) { in rt_mutex_adjust_prio_chain()
563 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
594 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
595 put_task_struct(task); in rt_mutex_adjust_prio_chain()
607 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
608 get_task_struct(task); in rt_mutex_adjust_prio_chain()
609 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
617 next_lock = task_blocked_on_lock(task); in rt_mutex_adjust_prio_chain()
624 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
642 waiter->prio = task->prio; in rt_mutex_adjust_prio_chain()
646 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
647 put_task_struct(task); in rt_mutex_adjust_prio_chain()
663 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
669 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
670 get_task_struct(task); in rt_mutex_adjust_prio_chain()
671 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
681 rt_mutex_dequeue_pi(task, prerequeue_top_waiter); in rt_mutex_adjust_prio_chain()
682 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
683 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio_chain()
696 rt_mutex_dequeue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
698 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
699 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio_chain()
717 next_lock = task_blocked_on_lock(task); in rt_mutex_adjust_prio_chain()
725 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
749 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
751 put_task_struct(task); in rt_mutex_adjust_prio_chain()
766 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
830 if (task->prio >= rt_mutex_top_waiter(lock)->prio) in try_to_take_rt_mutex()
855 raw_spin_lock_irqsave(&task->pi_lock, flags); in try_to_take_rt_mutex()
856 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
863 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
864 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in try_to_take_rt_mutex()
874 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
876 rt_mutex_deadlock_account_lock(lock, task); in try_to_take_rt_mutex()
890 struct task_struct *task, in task_blocks_on_rt_mutex() argument
908 if (owner == task) in task_blocks_on_rt_mutex()
911 raw_spin_lock_irqsave(&task->pi_lock, flags); in task_blocks_on_rt_mutex()
912 __rt_mutex_adjust_prio(task); in task_blocks_on_rt_mutex()
913 waiter->task = task; in task_blocks_on_rt_mutex()
915 waiter->prio = task->prio; in task_blocks_on_rt_mutex()
922 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
924 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in task_blocks_on_rt_mutex()
963 next_lock, waiter, task); in task_blocks_on_rt_mutex()
1006 wake_q_add(wake_q, waiter->task); in mark_wakeup_next_waiter()
1072 void rt_mutex_adjust_pi(struct task_struct *task) in rt_mutex_adjust_pi() argument
1078 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1080 waiter = task->pi_blocked_on; in rt_mutex_adjust_pi()
1081 if (!waiter || (waiter->prio == task->prio && in rt_mutex_adjust_pi()
1082 !dl_prio(task->prio))) { in rt_mutex_adjust_pi()
1083 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1087 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1090 get_task_struct(task); in rt_mutex_adjust_pi()
1092 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, in rt_mutex_adjust_pi()
1093 next_lock, NULL, task); in rt_mutex_adjust_pi()
1126 if (timeout && !timeout->task) in __rt_mutex_slowlock()
1595 struct task_struct *task) in rt_mutex_start_proxy_lock() argument
1601 if (try_to_take_rt_mutex(lock, task, NULL)) { in rt_mutex_start_proxy_lock()
1607 ret = task_blocks_on_rt_mutex(lock, waiter, task, in rt_mutex_start_proxy_lock()
1647 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()