Lines Matching refs:task

161 		return (left->task->dl.deadline < right->task->dl.deadline);  in rt_mutex_waiter_less()
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue_pi() argument
208 struct rb_node **link = &task->pi_waiters.rb_node; in rt_mutex_enqueue_pi()
225 task->pi_waiters_leftmost = &waiter->pi_tree_entry; in rt_mutex_enqueue_pi()
228 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_enqueue_pi()
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) in rt_mutex_dequeue_pi() argument
237 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) in rt_mutex_dequeue_pi()
238 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
240 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); in rt_mutex_dequeue_pi()
250 int rt_mutex_getprio(struct task_struct *task) in rt_mutex_getprio() argument
252 if (likely(!task_has_pi_waiters(task))) in rt_mutex_getprio()
253 return task->normal_prio; in rt_mutex_getprio()
255 return min(task_top_pi_waiter(task)->prio, in rt_mutex_getprio()
256 task->normal_prio); in rt_mutex_getprio()
259 struct task_struct *rt_mutex_get_top_task(struct task_struct *task) in rt_mutex_get_top_task() argument
261 if (likely(!task_has_pi_waiters(task))) in rt_mutex_get_top_task()
264 return task_top_pi_waiter(task)->task; in rt_mutex_get_top_task()
271 int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) in rt_mutex_get_effective_prio() argument
273 if (!task_has_pi_waiters(task)) in rt_mutex_get_effective_prio()
276 if (task_top_pi_waiter(task)->task->prio <= newprio) in rt_mutex_get_effective_prio()
277 return task_top_pi_waiter(task)->task->prio; in rt_mutex_get_effective_prio()
286 static void __rt_mutex_adjust_prio(struct task_struct *task) in __rt_mutex_adjust_prio() argument
288 int prio = rt_mutex_getprio(task); in __rt_mutex_adjust_prio()
290 if (task->prio != prio || dl_prio(prio)) in __rt_mutex_adjust_prio()
291 rt_mutex_setprio(task, prio); in __rt_mutex_adjust_prio()
303 static void rt_mutex_adjust_prio(struct task_struct *task) in rt_mutex_adjust_prio() argument
307 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio()
308 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio()
309 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio()
411 static int rt_mutex_adjust_prio_chain(struct task_struct *task, in rt_mutex_adjust_prio_chain() argument
451 put_task_struct(task); in rt_mutex_adjust_prio_chain()
466 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
471 waiter = task->pi_blocked_on; in rt_mutex_adjust_prio_chain()
510 if (!task_has_pi_waiters(task)) in rt_mutex_adjust_prio_chain()
518 if (top_waiter != task_top_pi_waiter(task)) { in rt_mutex_adjust_prio_chain()
533 if (waiter->prio == task->prio) { in rt_mutex_adjust_prio_chain()
550 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
581 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
582 put_task_struct(task); in rt_mutex_adjust_prio_chain()
594 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
595 get_task_struct(task); in rt_mutex_adjust_prio_chain()
596 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
604 next_lock = task_blocked_on_lock(task); in rt_mutex_adjust_prio_chain()
611 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
629 waiter->prio = task->prio; in rt_mutex_adjust_prio_chain()
633 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
634 put_task_struct(task); in rt_mutex_adjust_prio_chain()
650 wake_up_process(rt_mutex_top_waiter(lock)->task); in rt_mutex_adjust_prio_chain()
656 task = rt_mutex_owner(lock); in rt_mutex_adjust_prio_chain()
657 get_task_struct(task); in rt_mutex_adjust_prio_chain()
658 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
668 rt_mutex_dequeue_pi(task, prerequeue_top_waiter); in rt_mutex_adjust_prio_chain()
669 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
670 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio_chain()
683 rt_mutex_dequeue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
685 rt_mutex_enqueue_pi(task, waiter); in rt_mutex_adjust_prio_chain()
686 __rt_mutex_adjust_prio(task); in rt_mutex_adjust_prio_chain()
704 next_lock = task_blocked_on_lock(task); in rt_mutex_adjust_prio_chain()
712 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
736 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_prio_chain()
738 put_task_struct(task); in rt_mutex_adjust_prio_chain()
753 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, in try_to_take_rt_mutex() argument
817 if (task->prio >= rt_mutex_top_waiter(lock)->prio) in try_to_take_rt_mutex()
842 raw_spin_lock_irqsave(&task->pi_lock, flags); in try_to_take_rt_mutex()
843 task->pi_blocked_on = NULL; in try_to_take_rt_mutex()
850 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); in try_to_take_rt_mutex()
851 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in try_to_take_rt_mutex()
861 rt_mutex_set_owner(lock, task); in try_to_take_rt_mutex()
863 rt_mutex_deadlock_account_lock(lock, task); in try_to_take_rt_mutex()
877 struct task_struct *task, in task_blocks_on_rt_mutex() argument
895 if (owner == task) in task_blocks_on_rt_mutex()
898 raw_spin_lock_irqsave(&task->pi_lock, flags); in task_blocks_on_rt_mutex()
899 __rt_mutex_adjust_prio(task); in task_blocks_on_rt_mutex()
900 waiter->task = task; in task_blocks_on_rt_mutex()
902 waiter->prio = task->prio; in task_blocks_on_rt_mutex()
909 task->pi_blocked_on = waiter; in task_blocks_on_rt_mutex()
911 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in task_blocks_on_rt_mutex()
950 next_lock, waiter, task); in task_blocks_on_rt_mutex()
999 wake_up_process(waiter->task); in wakeup_next_waiter()
1065 void rt_mutex_adjust_pi(struct task_struct *task) in rt_mutex_adjust_pi() argument
1071 raw_spin_lock_irqsave(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1073 waiter = task->pi_blocked_on; in rt_mutex_adjust_pi()
1074 if (!waiter || (waiter->prio == task->prio && in rt_mutex_adjust_pi()
1075 !dl_prio(task->prio))) { in rt_mutex_adjust_pi()
1076 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1080 raw_spin_unlock_irqrestore(&task->pi_lock, flags); in rt_mutex_adjust_pi()
1083 get_task_struct(task); in rt_mutex_adjust_pi()
1085 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, in rt_mutex_adjust_pi()
1086 next_lock, NULL, task); in rt_mutex_adjust_pi()
1119 if (timeout && !timeout->task) in __rt_mutex_slowlock()
1188 timeout->task = NULL; in rt_mutex_slowlock()
1553 struct task_struct *task) in rt_mutex_start_proxy_lock() argument
1559 if (try_to_take_rt_mutex(lock, task, NULL)) { in rt_mutex_start_proxy_lock()
1565 ret = task_blocks_on_rt_mutex(lock, waiter, task, in rt_mutex_start_proxy_lock()
1605 return rt_mutex_top_waiter(lock)->task; in rt_mutex_next_owner()