Searched refs:waiter (Results 1 - 68 of 68) sorted by relevance

/linux-4.1.27/drivers/gpu/host1x/
H A Dintr.c44 * add a waiter to a waiter queue, sorted by threshold
47 static bool add_waiter_to_queue(struct host1x_waitlist *waiter, add_waiter_to_queue() argument
51 u32 thresh = waiter->thresh; add_waiter_to_queue()
55 list_add(&waiter->list, &pos->list); list_for_each_entry_reverse()
59 list_add(&waiter->list, queue);
64 * run through a waiter queue for a single sync point ID
71 struct host1x_waitlist *waiter, *next, *prev; remove_completed_waiters() local
73 list_for_each_entry_safe(waiter, next, head, list) { list_for_each_entry_safe()
74 if ((s32)(waiter->thresh - sync) > 0) list_for_each_entry_safe()
77 dest = completed + waiter->action; list_for_each_entry_safe()
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE && list_for_each_entry_safe()
84 if (prev->data == waiter->data) { list_for_each_entry_safe()
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { list_for_each_entry_safe()
92 list_del(&waiter->list); list_for_each_entry_safe()
93 kref_put(&waiter->refcount, waiter_release); list_for_each_entry_safe()
95 list_move_tail(&waiter->list, dest); list_for_each_entry_safe()
110 static void action_submit_complete(struct host1x_waitlist *waiter) action_submit_complete() argument
112 struct host1x_channel *channel = waiter->data; action_submit_complete()
118 waiter->count, waiter->thresh); action_submit_complete()
122 static void action_wakeup(struct host1x_waitlist *waiter) action_wakeup() argument
124 wait_queue_head_t *wq = waiter->data; action_wakeup()
128 static void action_wakeup_interruptible(struct host1x_waitlist *waiter) action_wakeup_interruptible() argument
130 wait_queue_head_t *wq = waiter->data; action_wakeup_interruptible()
134 typedef void (*action_handler)(struct host1x_waitlist *waiter);
149 struct host1x_waitlist *waiter, *next; run_handlers() local
151 list_for_each_entry_safe(waiter, next, head, list) { list_for_each_entry_safe()
152 list_del(&waiter->list); list_for_each_entry_safe()
153 handler(waiter); list_for_each_entry_safe()
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != list_for_each_entry_safe()
156 kref_put(&waiter->refcount, waiter_release); list_for_each_entry_safe()
214 struct host1x_waitlist *waiter, void **ref) host1x_intr_add_action()
219 if (waiter == NULL) { host1x_intr_add_action()
220 pr_warn("%s: NULL waiter\n", __func__); host1x_intr_add_action()
224 /* initialize a new waiter */ host1x_intr_add_action()
225 INIT_LIST_HEAD(&waiter->list); host1x_intr_add_action()
226 kref_init(&waiter->refcount); host1x_intr_add_action()
228 kref_get(&waiter->refcount); host1x_intr_add_action()
229 waiter->thresh = thresh; host1x_intr_add_action()
230 waiter->action = action; host1x_intr_add_action()
231 atomic_set(&waiter->state, WLS_PENDING); host1x_intr_add_action()
232 waiter->data = data; host1x_intr_add_action()
233 waiter->count = 1; host1x_intr_add_action()
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) { host1x_intr_add_action()
245 /* added as first waiter - enable interrupt */ host1x_intr_add_action()
253 *ref = waiter; host1x_intr_add_action()
259 struct host1x_waitlist *waiter = ref; host1x_intr_put_ref() local
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == host1x_intr_put_ref()
270 kref_put(&waiter->refcount, waiter_release); host1x_intr_put_ref()
331 struct host1x_waitlist *waiter, *next; host1x_intr_stop() local
333 list_for_each_entry_safe(waiter, next, host1x_intr_stop()
335 if (atomic_cmpxchg(&waiter->state, host1x_intr_stop()
337 list_del(&waiter->list); host1x_intr_stop()
338 kref_put(&waiter->refcount, waiter_release); host1x_intr_stop()
212 host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, enum host1x_intr_action action, void *data, struct host1x_waitlist *waiter, void **ref) host1x_intr_add_action() argument
H A Dintr.h73 * @waiter waiter structure - assumes ownership
80 struct host1x_waitlist *waiter, void **ref);
H A Dsyncpt.c193 struct host1x_waitlist *waiter; host1x_syncpt_wait() local
220 /* allocate a waiter */ host1x_syncpt_wait()
221 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); host1x_syncpt_wait()
222 if (!waiter) { host1x_syncpt_wait()
230 &wq, waiter, &ref); host1x_syncpt_wait()
/linux-4.1.27/kernel/locking/
H A Dmutex-debug.c30 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_lock_common() argument
32 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); debug_mutex_lock_common()
33 waiter->magic = waiter; debug_mutex_lock_common()
34 INIT_LIST_HEAD(&waiter->list); debug_mutex_lock_common()
37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_wake_waiter() argument
41 DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); debug_mutex_wake_waiter()
42 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); debug_mutex_wake_waiter()
45 void debug_mutex_free_waiter(struct mutex_waiter *waiter) debug_mutex_free_waiter() argument
47 DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list)); debug_mutex_free_waiter()
48 memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter)); debug_mutex_free_waiter()
51 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, debug_mutex_add_waiter() argument
57 ti->task->blocked_on = waiter; debug_mutex_add_waiter()
60 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, mutex_remove_waiter() argument
63 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); mutex_remove_waiter()
64 DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); mutex_remove_waiter()
65 DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); mutex_remove_waiter()
68 list_del_init(&waiter->list); mutex_remove_waiter()
69 waiter->task = NULL; mutex_remove_waiter()
H A Dmutex.h16 #define mutex_remove_waiter(lock, waiter, ti) \
17 __list_del((waiter)->list.prev, (waiter)->list.next)
39 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
40 #define debug_mutex_free_waiter(waiter) do { } while (0)
41 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
46 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) debug_mutex_lock_common() argument
H A Drtmutex-debug.h15 extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
16 extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
24 struct rt_mutex_waiter *waiter,
26 extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
30 static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, debug_rt_mutex_detect_deadlock() argument
33 return (waiter != NULL); debug_rt_mutex_detect_deadlock()
H A Drtmutex-debug.c65 * We fill out the fields in the waiter to store the information about
67 * case of a remove waiter operation.
85 void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) debug_rt_mutex_print_deadlock() argument
89 if (!waiter->deadlock_lock || !debug_locks) debug_rt_mutex_print_deadlock()
93 task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); debug_rt_mutex_print_deadlock()
114 printk_lock(waiter->lock, 1); debug_rt_mutex_print_deadlock()
118 printk_lock(waiter->deadlock_lock, 1); debug_rt_mutex_print_deadlock()
155 void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) debug_rt_mutex_init_waiter() argument
157 memset(waiter, 0x11, sizeof(*waiter)); debug_rt_mutex_init_waiter()
158 waiter->deadlock_task_pid = NULL; debug_rt_mutex_init_waiter()
161 void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) debug_rt_mutex_free_waiter() argument
163 put_pid(waiter->deadlock_task_pid); debug_rt_mutex_free_waiter()
164 memset(waiter, 0x22, sizeof(*waiter)); debug_rt_mutex_free_waiter()
H A Drtmutex.c30 * NULL 1 lock is free and has waiters and the top waiter
101 * If a new waiter comes in between the unlock and the cmpxchg
119 * wake waiter();
157 * If left waiter has a dl_prio(), and we didn't return 1 above, rt_mutex_waiter_less()
158 * then right waiter has a dl_prio() too. rt_mutex_waiter_less()
167 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_enqueue() argument
177 if (rt_mutex_waiter_less(waiter, entry)) { rt_mutex_enqueue()
186 lock->waiters_leftmost = &waiter->tree_entry; rt_mutex_enqueue()
188 rb_link_node(&waiter->tree_entry, parent, link); rt_mutex_enqueue()
189 rb_insert_color(&waiter->tree_entry, &lock->waiters); rt_mutex_enqueue()
193 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_dequeue() argument
195 if (RB_EMPTY_NODE(&waiter->tree_entry)) rt_mutex_dequeue()
198 if (lock->waiters_leftmost == &waiter->tree_entry) rt_mutex_dequeue()
199 lock->waiters_leftmost = rb_next(&waiter->tree_entry); rt_mutex_dequeue()
201 rb_erase(&waiter->tree_entry, &lock->waiters); rt_mutex_dequeue()
202 RB_CLEAR_NODE(&waiter->tree_entry); rt_mutex_dequeue()
206 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_enqueue_pi() argument
216 if (rt_mutex_waiter_less(waiter, entry)) { rt_mutex_enqueue_pi()
225 task->pi_waiters_leftmost = &waiter->pi_tree_entry; rt_mutex_enqueue_pi()
227 rb_link_node(&waiter->pi_tree_entry, parent, link); rt_mutex_enqueue_pi()
228 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); rt_mutex_enqueue_pi()
232 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_dequeue_pi() argument
234 if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) rt_mutex_dequeue_pi()
237 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) rt_mutex_dequeue_pi()
238 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); rt_mutex_dequeue_pi()
240 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); rt_mutex_dequeue_pi()
241 RB_CLEAR_NODE(&waiter->pi_tree_entry); rt_mutex_dequeue_pi()
245 * Calculate task priority from the waiter tree priority
247 * Return task->normal_prio when the waiter tree is empty or when
248 * the waiter is not allowed to do priority boosting
321 * If the waiter argument is NULL this indicates the deboost path and
325 static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, rt_mutex_cond_detect_deadlock() argument
335 return debug_rt_mutex_detect_deadlock(waiter, chwalk); rt_mutex_cond_detect_deadlock()
363 * depicted above or if the top waiter is gone away and we are
365 * @top_task: the current top waiter
390 * [2] waiter = task->pi_blocked_on; [P]
392 * [4] lock = waiter->lock; [P]
398 * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
418 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; rt_mutex_adjust_prio_chain() local
469 * [2] Get the waiter on which @task is blocked on. rt_mutex_adjust_prio_chain()
471 waiter = task->pi_blocked_on; rt_mutex_adjust_prio_chain()
482 if (!waiter) rt_mutex_adjust_prio_chain()
501 if (next_lock != waiter->lock) rt_mutex_adjust_prio_chain()
514 * are not the top pi waiter of the task. If deadlock rt_mutex_adjust_prio_chain()
527 * If the waiter priority is the same as the task priority rt_mutex_adjust_prio_chain()
533 if (waiter->prio == task->prio) { rt_mutex_adjust_prio_chain()
543 lock = waiter->lock; rt_mutex_adjust_prio_chain()
606 * Get the top waiter for the next iteration rt_mutex_adjust_prio_chain()
621 * Store the current top waiter before doing the requeue rt_mutex_adjust_prio_chain()
627 /* [7] Requeue the waiter in the lock waiter list. */ rt_mutex_adjust_prio_chain()
628 rt_mutex_dequeue(lock, waiter); rt_mutex_adjust_prio_chain()
629 waiter->prio = task->prio; rt_mutex_adjust_prio_chain()
630 rt_mutex_enqueue(lock, waiter); rt_mutex_adjust_prio_chain()
645 * If the requeue [7] above changed the top waiter, rt_mutex_adjust_prio_chain()
646 * then we need to wake the new top waiter up to try rt_mutex_adjust_prio_chain()
661 if (waiter == rt_mutex_top_waiter(lock)) { rt_mutex_adjust_prio_chain()
663 * The waiter became the new top (highest priority) rt_mutex_adjust_prio_chain()
664 * waiter on the lock. Replace the previous top waiter rt_mutex_adjust_prio_chain()
665 * in the owner tasks pi waiters list with this waiter rt_mutex_adjust_prio_chain()
669 rt_mutex_enqueue_pi(task, waiter); rt_mutex_adjust_prio_chain()
672 } else if (prerequeue_top_waiter == waiter) { rt_mutex_adjust_prio_chain()
674 * The waiter was the top waiter on the lock, but is rt_mutex_adjust_prio_chain()
675 * no longer the top prority waiter. Replace waiter in rt_mutex_adjust_prio_chain()
677 * (highest priority) waiter and adjust the priority rt_mutex_adjust_prio_chain()
679 * The new top waiter is stored in @waiter so that rt_mutex_adjust_prio_chain()
680 * @waiter == @top_waiter evaluates to true below and rt_mutex_adjust_prio_chain()
683 rt_mutex_dequeue_pi(task, waiter); rt_mutex_adjust_prio_chain()
684 waiter = rt_mutex_top_waiter(lock); rt_mutex_adjust_prio_chain()
685 rt_mutex_enqueue_pi(task, waiter); rt_mutex_adjust_prio_chain()
706 * Store the top waiter of @lock for the end of chain walk rt_mutex_adjust_prio_chain()
726 * If the current waiter is not the top waiter on the lock, rt_mutex_adjust_prio_chain()
730 if (!detect_deadlock && waiter != top_waiter) rt_mutex_adjust_prio_chain()
750 * @waiter: The waiter that is queued to the lock's wait list if the
754 struct rt_mutex_waiter *waiter) try_to_take_rt_mutex()
784 * If @waiter != NULL, @task has already enqueued the waiter try_to_take_rt_mutex()
785 * into @lock waiter list. If @waiter == NULL then this is a try_to_take_rt_mutex()
788 if (waiter) { try_to_take_rt_mutex()
790 * If waiter is not the highest priority waiter of try_to_take_rt_mutex()
793 if (waiter != rt_mutex_top_waiter(lock)) try_to_take_rt_mutex()
797 * We can acquire the lock. Remove the waiter from the try_to_take_rt_mutex()
800 rt_mutex_dequeue(lock, waiter); try_to_take_rt_mutex()
814 * the top waiter priority (kernel view), try_to_take_rt_mutex()
821 * The current top waiter stays enqueued. We try_to_take_rt_mutex()
838 * @task->pi_lock. Redundant operation for the @waiter == NULL try_to_take_rt_mutex()
847 * waiter into @task->pi_waiters list. try_to_take_rt_mutex()
871 * Prepare waiter and propagate pi chain
876 struct rt_mutex_waiter *waiter, task_blocks_on_rt_mutex()
881 struct rt_mutex_waiter *top_waiter = waiter; task_blocks_on_rt_mutex()
889 * only an optimization. We drop the locks, so another waiter task_blocks_on_rt_mutex()
892 * which is wrong, as the other waiter is not in a deadlock task_blocks_on_rt_mutex()
900 waiter->task = task; task_blocks_on_rt_mutex()
901 waiter->lock = lock; task_blocks_on_rt_mutex()
902 waiter->prio = task->prio; task_blocks_on_rt_mutex()
904 /* Get the top priority waiter on the lock */ task_blocks_on_rt_mutex()
907 rt_mutex_enqueue(lock, waiter); task_blocks_on_rt_mutex()
909 task->pi_blocked_on = waiter; task_blocks_on_rt_mutex()
917 if (waiter == rt_mutex_top_waiter(lock)) { task_blocks_on_rt_mutex()
919 rt_mutex_enqueue_pi(owner, waiter); task_blocks_on_rt_mutex()
924 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { task_blocks_on_rt_mutex()
950 next_lock, waiter, task); task_blocks_on_rt_mutex()
958 * Wake up the next waiter on the lock.
960 * Remove the top waiter from the current tasks pi waiter list and
967 struct rt_mutex_waiter *waiter; wakeup_next_waiter() local
972 waiter = rt_mutex_top_waiter(lock); wakeup_next_waiter()
980 rt_mutex_dequeue_pi(current, waiter); wakeup_next_waiter()
983 * As we are waking up the top waiter, and the waiter stays wakeup_next_waiter()
988 * the top waiter can steal this lock. wakeup_next_waiter()
995 * It's safe to dereference waiter as it cannot go away as wakeup_next_waiter()
996 * long as we hold lock->wait_lock. The waiter task needs to wakeup_next_waiter()
997 * acquire it in order to dequeue the waiter. wakeup_next_waiter()
999 wake_up_process(waiter->task); wakeup_next_waiter()
1003 * Remove a waiter from a lock and give up
1009 struct rt_mutex_waiter *waiter) remove_waiter()
1011 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); remove_waiter()
1017 rt_mutex_dequeue(lock, waiter); remove_waiter()
1022 * Only update priority if the waiter was the highest priority remove_waiter()
1023 * waiter of the lock and there is an owner to update. remove_waiter()
1030 rt_mutex_dequeue_pi(owner, waiter); remove_waiter()
1067 struct rt_mutex_waiter *waiter; rt_mutex_adjust_pi() local
1073 waiter = task->pi_blocked_on; rt_mutex_adjust_pi()
1074 if (!waiter || (waiter->prio == task->prio && rt_mutex_adjust_pi()
1079 next_lock = waiter->lock; rt_mutex_adjust_pi()
1095 * @waiter: the pre-initialized rt_mutex_waiter
1102 struct rt_mutex_waiter *waiter) __rt_mutex_slowlock()
1108 if (try_to_take_rt_mutex(lock, current, waiter)) __rt_mutex_slowlock()
1127 debug_rt_mutex_print_deadlock(waiter); __rt_mutex_slowlock()
1167 struct rt_mutex_waiter waiter; rt_mutex_slowlock() local
1170 debug_rt_mutex_init_waiter(&waiter); rt_mutex_slowlock()
1171 RB_CLEAR_NODE(&waiter.pi_tree_entry); rt_mutex_slowlock()
1172 RB_CLEAR_NODE(&waiter.tree_entry); rt_mutex_slowlock()
1191 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); rt_mutex_slowlock()
1195 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); rt_mutex_slowlock()
1200 remove_waiter(lock, &waiter); rt_mutex_slowlock()
1201 rt_mutex_handle_deadlock(ret, chwalk, &waiter); rt_mutex_slowlock()
1205 * try_to_take_rt_mutex() sets the waiter bit rt_mutex_slowlock()
1216 debug_rt_mutex_free_waiter(&waiter); rt_mutex_slowlock()
1307 * The wakeup next waiter path does not suffer from the above rt_mutex_slowunlock()
1541 * @waiter: the pre-initialized rt_mutex_waiter
1552 struct rt_mutex_waiter *waiter, rt_mutex_start_proxy_lock()
1565 ret = task_blocks_on_rt_mutex(lock, waiter, task, rt_mutex_start_proxy_lock()
1573 * pi chain. Let the waiter sort it out. rt_mutex_start_proxy_lock()
1579 remove_waiter(lock, waiter); rt_mutex_start_proxy_lock()
1583 debug_rt_mutex_print_deadlock(waiter); rt_mutex_start_proxy_lock()
1613 * @waiter: the pre-initialized rt_mutex_waiter
1625 struct rt_mutex_waiter *waiter) rt_mutex_finish_proxy_lock()
1634 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); rt_mutex_finish_proxy_lock()
1637 remove_waiter(lock, waiter); rt_mutex_finish_proxy_lock()
1640 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might rt_mutex_finish_proxy_lock()
753 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) try_to_take_rt_mutex() argument
875 task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, enum rtmutex_chainwalk chwalk) task_blocks_on_rt_mutex() argument
1008 remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) remove_waiter() argument
1100 __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) __rt_mutex_slowlock() argument
1551 rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) rt_mutex_start_proxy_lock() argument
1623 rt_mutex_finish_proxy_lock(struct rt_mutex *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) rt_mutex_finish_proxy_lock() argument
H A Drwsem-spinlock.c67 struct rwsem_waiter *waiter; __rwsem_do_wake() local
71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); __rwsem_do_wake()
73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { __rwsem_do_wake()
77 wake_up_process(waiter->task); __rwsem_do_wake()
84 struct list_head *next = waiter->list.next; __rwsem_do_wake()
86 list_del(&waiter->list); __rwsem_do_wake()
87 tsk = waiter->task; __rwsem_do_wake()
96 waiter->task = NULL; __rwsem_do_wake()
102 waiter = list_entry(next, struct rwsem_waiter, list); __rwsem_do_wake()
103 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); __rwsem_do_wake()
117 struct rwsem_waiter *waiter; __rwsem_wake_one_writer() local
119 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); __rwsem_wake_one_writer()
120 wake_up_process(waiter->task); __rwsem_wake_one_writer()
130 struct rwsem_waiter waiter; __down_read() local
147 waiter.task = tsk; __down_read()
148 waiter.type = RWSEM_WAITING_FOR_READ; __down_read()
151 list_add_tail(&waiter.list, &sem->wait_list); __down_read()
158 if (!waiter.task) __down_read()
196 struct rwsem_waiter waiter; __down_write_nested() local
204 waiter.task = tsk; __down_write_nested()
205 waiter.type = RWSEM_WAITING_FOR_WRITE; __down_write_nested()
206 list_add_tail(&waiter.list, &sem->wait_list); __down_write_nested()
225 list_del(&waiter.list); __down_write_nested()
H A Dmutex-debug.h17 struct mutex_waiter *waiter);
19 struct mutex_waiter *waiter);
20 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
22 struct mutex_waiter *waiter,
24 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
H A Drwsem-xadd.c124 struct rwsem_waiter *waiter; __rwsem_do_wake() local
129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); __rwsem_do_wake()
130 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { __rwsem_do_wake()
137 wake_up_process(waiter->task); __rwsem_do_wake()
168 if (waiter->list.next == &sem->wait_list) __rwsem_do_wake()
171 waiter = list_entry(waiter->list.next, __rwsem_do_wake()
174 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); __rwsem_do_wake()
177 if (waiter->type != RWSEM_WAITING_FOR_WRITE) __rwsem_do_wake()
187 waiter = list_entry(next, struct rwsem_waiter, list); __rwsem_do_wake()
188 next = waiter->list.next; __rwsem_do_wake()
189 tsk = waiter->task; __rwsem_do_wake()
198 waiter->task = NULL; __rwsem_do_wake()
217 struct rwsem_waiter waiter; rwsem_down_read_failed() local
221 waiter.task = tsk; rwsem_down_read_failed()
222 waiter.type = RWSEM_WAITING_FOR_READ; rwsem_down_read_failed()
228 list_add_tail(&waiter.list, &sem->wait_list); rwsem_down_read_failed()
236 * wake our own waiter to join the existing active readers ! rwsem_down_read_failed()
248 if (!waiter.task) rwsem_down_read_failed()
427 struct rwsem_waiter waiter; rwsem_down_write_failed() local
440 waiter.task = current; rwsem_down_write_failed()
441 waiter.type = RWSEM_WAITING_FOR_WRITE; rwsem_down_write_failed()
449 list_add_tail(&waiter.list, &sem->wait_list); rwsem_down_write_failed()
483 list_del(&waiter.list); rwsem_down_write_failed()
491 * handle waking up a waiter on the semaphore
H A Dsemaphore.c208 struct semaphore_waiter waiter; __down_common() local
210 list_add_tail(&waiter.list, &sem->wait_list); __down_common()
211 waiter.task = task; __down_common()
212 waiter.up = false; __down_common()
223 if (waiter.up) __down_common()
228 list_del(&waiter.list); __down_common()
232 list_del(&waiter.list); __down_common()
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, __up() local
260 list_del(&waiter->list); __up()
261 waiter->up = true; __up()
262 wake_up_process(waiter->task); __up()
H A Drtmutex_common.h128 struct rt_mutex_waiter *waiter,
132 struct rt_mutex_waiter *waiter);
H A Dmutex.c172 * to waiter list and sleep. ww_mutex_set_context_fastpath()
510 struct mutex_waiter waiter; __mutex_lock_common() local
538 debug_mutex_lock_common(lock, &waiter); __mutex_lock_common()
539 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); __mutex_lock_common()
542 list_add_tail(&waiter.list, &lock->wait_list); __mutex_lock_common()
543 waiter.task = task; __mutex_lock_common()
586 mutex_remove_waiter(lock, &waiter, current_thread_info()); __mutex_lock_common()
590 debug_mutex_free_waiter(&waiter); __mutex_lock_common()
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); __mutex_lock_common()
609 debug_mutex_free_waiter(&waiter); __mutex_lock_common()
743 struct mutex_waiter *waiter = __mutex_unlock_common_slowpath() local
747 debug_mutex_wake_waiter(lock, waiter); __mutex_unlock_common_slowpath()
749 wake_up_process(waiter->task); __mutex_unlock_common_slowpath()
H A Dmcs_spinlock.h107 /* Pass lock to next waiter. */ mcs_spin_unlock()
/linux-4.1.27/drivers/tty/
H A Dtty_ldsem.c7 * 2) Write waiter has priority
121 struct ldsem_waiter *waiter, *next; __ldsem_wake_readers() local
138 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { __ldsem_wake_readers()
139 tsk = waiter->task; __ldsem_wake_readers()
141 waiter->task = NULL; __ldsem_wake_readers()
165 struct ldsem_waiter *waiter; __ldsem_wake_writer() local
167 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); __ldsem_wake_writer()
168 wake_up_process(waiter->task); __ldsem_wake_writer()
202 struct ldsem_waiter waiter; down_read_failed() local
221 list_add_tail(&waiter.list, &sem->read_wait); down_read_failed()
224 waiter.task = tsk; down_read_failed()
237 if (!waiter.task) down_read_failed()
251 if (waiter.task) { down_read_failed()
253 list_del(&waiter.list); down_read_failed()
255 put_task_struct(waiter.task); down_read_failed()
270 struct ldsem_waiter waiter; down_write_failed() local
290 list_add_tail(&waiter.list, &sem->write_wait); down_write_failed()
292 waiter.task = tsk; down_write_failed()
308 list_del(&waiter.list); down_write_failed()
/linux-4.1.27/drivers/staging/android/
H A Dsync.h171 struct sync_fence_waiter *waiter);
174 * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
184 static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, sync_fence_waiter_init() argument
187 INIT_LIST_HEAD(&waiter->work.task_list); sync_fence_waiter_init()
188 waiter->callback = callback; sync_fence_waiter_init()
304 * @waiter: waiter callback struck
309 * @waiter should be initialized with sync_fence_waiter_init().
312 struct sync_fence_waiter *waiter);
317 * @waiter: waiter callback struck
319 * returns 0 if waiter was removed from fence's async waiter list.
320 * returns -ENOENT if waiter was not found on fence's async waiter list.
323 * @waiter was never registered or if @fence has already signaled @waiter.
326 struct sync_fence_waiter *waiter);
H A Dsync_debug.c165 struct sync_fence_waiter *waiter; sync_print_fence() local
170 waiter = container_of(pos, struct sync_fence_waiter, work); sync_print_fence()
172 seq_printf(s, "waiter %pF\n", waiter->callback); sync_print_fence()
H A Dsync.c328 struct sync_fence_waiter *waiter) sync_fence_wait_async()
339 init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq); sync_fence_wait_async()
340 waiter->work.private = fence; sync_fence_wait_async()
345 __add_wait_queue_tail(&fence->wq, &waiter->work); sync_fence_wait_async()
356 struct sync_fence_waiter *waiter) sync_fence_cancel_async()
362 if (!list_empty(&waiter->work.task_list)) sync_fence_cancel_async()
363 list_del_init(&waiter->work.task_list); sync_fence_cancel_async()
327 sync_fence_wait_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) sync_fence_wait_async() argument
355 sync_fence_cancel_async(struct sync_fence *fence, struct sync_fence_waiter *waiter) sync_fence_cancel_async() argument
/linux-4.1.27/lib/
H A Dklist.c186 struct klist_waiter *waiter, *tmp; klist_release() local
192 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { klist_release()
193 if (waiter->node != n) klist_release()
196 list_del(&waiter->list); klist_release()
197 waiter->woken = 1; klist_release()
199 wake_up_process(waiter->process); klist_release()
241 struct klist_waiter waiter; klist_remove() local
243 waiter.node = n; klist_remove()
244 waiter.process = current; klist_remove()
245 waiter.woken = 0; klist_remove()
247 list_add(&waiter.list, &klist_remove_waiters); klist_remove()
254 if (waiter.woken) klist_remove()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_gem.c1042 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) is_waiting() argument
1044 struct omap_gem_object *omap_obj = waiter->omap_obj; is_waiting()
1045 if ((waiter->op & OMAP_GEM_READ) && is_waiting()
1046 (omap_obj->sync->write_complete < waiter->write_target)) is_waiting()
1048 if ((waiter->op & OMAP_GEM_WRITE) && is_waiting()
1049 (omap_obj->sync->read_complete < waiter->read_target)) is_waiting()
1064 struct omap_gem_sync_waiter *waiter, *n; sync_op_update() local
1065 list_for_each_entry_safe(waiter, n, &waiters, list) { sync_op_update()
1066 if (!is_waiting(waiter)) { sync_op_update()
1067 list_del(&waiter->list); sync_op_update()
1068 SYNC("notify: %p", waiter); sync_op_update()
1069 waiter->notify(waiter->arg); sync_op_update()
1070 kfree(waiter); sync_op_update()
1150 struct omap_gem_sync_waiter *waiter = omap_gem_op_sync() local
1151 kzalloc(sizeof(*waiter), GFP_KERNEL); omap_gem_op_sync()
1153 if (!waiter) omap_gem_op_sync()
1156 waiter->omap_obj = omap_obj; omap_gem_op_sync()
1157 waiter->op = op; omap_gem_op_sync()
1158 waiter->read_target = omap_obj->sync->read_pending; omap_gem_op_sync()
1159 waiter->write_target = omap_obj->sync->write_pending; omap_gem_op_sync()
1160 waiter->notify = sync_notify; omap_gem_op_sync()
1161 waiter->arg = &waiter_task; omap_gem_op_sync()
1164 if (is_waiting(waiter)) { omap_gem_op_sync()
1165 SYNC("waited: %p", waiter); omap_gem_op_sync()
1166 list_add_tail(&waiter->list, &waiters); omap_gem_op_sync()
1172 SYNC("interrupted: %p", waiter); omap_gem_op_sync()
1174 list_del(&waiter->list); omap_gem_op_sync()
1178 waiter = NULL; omap_gem_op_sync()
1182 kfree(waiter); omap_gem_op_sync()
1201 struct omap_gem_sync_waiter *waiter = omap_gem_op_async() local
1202 kzalloc(sizeof(*waiter), GFP_ATOMIC); omap_gem_op_async()
1204 if (!waiter) omap_gem_op_async()
1207 waiter->omap_obj = omap_obj; omap_gem_op_async()
1208 waiter->op = op; omap_gem_op_async()
1209 waiter->read_target = omap_obj->sync->read_pending; omap_gem_op_async()
1210 waiter->write_target = omap_obj->sync->write_pending; omap_gem_op_async()
1211 waiter->notify = fxn; omap_gem_op_async()
1212 waiter->arg = arg; omap_gem_op_async()
1215 if (is_waiting(waiter)) { omap_gem_op_async()
1216 SYNC("waited: %p", waiter); omap_gem_op_async()
1217 list_add_tail(&waiter->list, &waiters); omap_gem_op_async()
1224 kfree(waiter); omap_gem_op_async()
/linux-4.1.27/drivers/net/wireless/iwlwifi/
H A Diwl-notif-wait.h82 * the waiter stays blocked. If no function is given, any
83 * of the listed commands will unblock the waiter.
86 * @triggered: waiter should be woken up
/linux-4.1.27/drivers/md/persistent-data/
H A Ddm-block-manager.c49 struct waiter { struct
127 static void __wait(struct waiter *w) __wait()
141 static void __wake_waiter(struct waiter *w) __wake_waiter()
157 struct waiter *w, *tmp; __wake_many()
201 struct waiter w; bl_down_read()
264 struct waiter w; bl_down_write()
/linux-4.1.27/kernel/
H A Dfutex.c78 * The waiter reads the futex value in user space and calls
92 * optimization to work, ordering guarantees must exist so that the waiter
112 * This would cause the waiter on CPU 0 to wait forever because it
114 * and the waker did not find the waiter in the hash bucket queue.
116 * The correct serialization ensures that a waiter either observes
161 * Note that a new waiter is accounted for in (a) even when it is possible that
169 * will do the additional required waiter count housekeeping. This is done for
273 * Reflects a new waiter being added to the waitqueue.
287 * Reflects a waiter being removed from the waitqueue by wakeup
349 * is because we need the barrier for the lockless waiter check. get_futex_key_refs()
572 * futex_top_waiter() - Return the highest priority waiter on a futex
787 * [3] Invalid. The waiter is queued on a non PI futex
811 * Validate that the existing waiter has a pi_state and sanity check
834 * topmost waiter. The task which acquires the attach_to_pi_state()
852 * yet executed exit_pi_state_list() or some waiter attach_to_pi_state()
894 * We are the first waiter - try to look up the real owner and attach attach_to_pi_owner()
929 * No existing pi state. First waiter. [2] attach_to_pi_owner()
960 * If there is a waiter on that futex, validate it and lookup_pi_state()
967 * We are the first waiter - try to look up the owner based on lookup_pi_state()
1033 * No waiter and user TID is 0. We are here because the futex_lock_pi_atomic()
1056 * First waiter. Set the waiters bit before attaching ourself to futex_lock_pi_atomic()
1146 * It is possible that the next waiter (the one that brought wake_futex_pi()
1165 * try the TID->0 transition) raced with a waiter setting the wake_futex_pi()
1400 * to the requeue target futex so the waiter can detect the wakeup on the right
1424 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1433 * Try and get the lock on behalf of the top waiter if we can do it atomically.
1434 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1458 * If the caller intends to requeue more than 1 waiter to pifutex, futex_proxy_trylock_atomic()
1501 * uaddr2 atomically on behalf of the top waiter.
1598 * Attempt to acquire uaddr2 and wake the top waiter. If we futex_requeue()
1611 * vpid of the top waiter task. futex_requeue()
1620 * cannot be changed by the top waiter as it futex_requeue()
1707 /* Prepare the waiter to take the rt_mutex. */ futex_requeue()
1905 * previous highest priority waiter or we are the highest priority fixup_pi_state_owner()
1906 * waiter but failed to get the rtmutex the first time. fixup_pi_state_owner()
1956 * waiter itself or the task which stole the rtmutex) the fixup_pi_state_owner()
2125 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); futex_wait_setup()
2433 * A unconditional UNLOCK_PI op raced against a waiter futex_unlock_pi()
2593 * The waiter is allocated on our stack, manipulated by the requeue futex_wait_requeue_pi()
2727 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2818 * set, wake up a waiter (if any). (We have to do a handle_futex_death()
H A Dkmod.c552 * Set the completion pointer only if there is a waiter. call_usermodehelper_exec()
/linux-4.1.27/include/crypto/
H A Dmcryptd.h56 struct list_head waiter; member in struct:mcryptd_hash_request_ctx
/linux-4.1.27/drivers/gpu/drm/i915/
H A Dintel_ringbuffer.c1127 struct intel_engine_cs *waiter; gen8_rcs_signal() local
1138 for_each_ring(waiter, dev_priv, i) { for_each_ring()
1155 MI_SEMAPHORE_TARGET(waiter->id)); for_each_ring()
1168 struct intel_engine_cs *waiter; gen8_xcs_signal() local
1179 for_each_ring(waiter, dev_priv, i) { for_each_ring()
1194 MI_SEMAPHORE_TARGET(waiter->id)); for_each_ring()
1276 * intel_ring_sync - sync the waiter to the signaller on seqno
1278 * @waiter - ring that is waiting
1280 * @seqno - seqno which the waiter will block on
1284 gen8_ring_sync(struct intel_engine_cs *waiter, gen8_ring_sync() argument
1288 struct drm_i915_private *dev_priv = waiter->dev->dev_private; gen8_ring_sync()
1291 ret = intel_ring_begin(waiter, 4); gen8_ring_sync()
1295 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | gen8_ring_sync()
1299 intel_ring_emit(waiter, seqno); gen8_ring_sync()
1300 intel_ring_emit(waiter, gen8_ring_sync()
1301 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); gen8_ring_sync()
1302 intel_ring_emit(waiter, gen8_ring_sync()
1303 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); gen8_ring_sync()
1304 intel_ring_advance(waiter); gen8_ring_sync()
1309 gen6_ring_sync(struct intel_engine_cs *waiter, gen6_ring_sync() argument
1316 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; gen6_ring_sync()
1327 ret = intel_ring_begin(waiter, 4); gen6_ring_sync()
1332 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { gen6_ring_sync()
1333 intel_ring_emit(waiter, dw1 | wait_mbox); gen6_ring_sync()
1334 intel_ring_emit(waiter, seqno); gen6_ring_sync()
1335 intel_ring_emit(waiter, 0); gen6_ring_sync()
1336 intel_ring_emit(waiter, MI_NOOP); gen6_ring_sync()
1338 intel_ring_emit(waiter, MI_NOOP); gen6_ring_sync()
1339 intel_ring_emit(waiter, MI_NOOP); gen6_ring_sync()
1340 intel_ring_emit(waiter, MI_NOOP); gen6_ring_sync()
1341 intel_ring_emit(waiter, MI_NOOP); gen6_ring_sync()
1343 intel_ring_advance(waiter); gen6_ring_sync()
H A Di915_drv.h1274 * waiter and the gpu reset work code.
/linux-4.1.27/fs/
H A Dlocks.c588 static void locks_insert_global_blocked(struct file_lock *waiter) locks_insert_global_blocked() argument
592 hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); locks_insert_global_blocked()
595 static void locks_delete_global_blocked(struct file_lock *waiter) locks_delete_global_blocked() argument
599 hash_del(&waiter->fl_link); locks_delete_global_blocked()
602 /* Remove waiter from blocker's block list.
607 static void __locks_delete_block(struct file_lock *waiter) __locks_delete_block() argument
609 locks_delete_global_blocked(waiter); __locks_delete_block()
610 list_del_init(&waiter->fl_block); __locks_delete_block()
611 waiter->fl_next = NULL; __locks_delete_block()
614 static void locks_delete_block(struct file_lock *waiter) locks_delete_block() argument
617 __locks_delete_block(waiter); locks_delete_block()
621 /* Insert waiter into blocker's block list.
632 struct file_lock *waiter) __locks_insert_block()
634 BUG_ON(!list_empty(&waiter->fl_block)); __locks_insert_block()
635 waiter->fl_next = blocker; __locks_insert_block()
636 list_add_tail(&waiter->fl_block, &blocker->fl_block); __locks_insert_block()
638 locks_insert_global_blocked(waiter); __locks_insert_block()
643 struct file_lock *waiter) locks_insert_block()
646 __locks_insert_block(blocker, waiter); locks_insert_block()
669 struct file_lock *waiter; locks_wake_up_blocks() local
671 waiter = list_first_entry(&blocker->fl_block, locks_wake_up_blocks()
673 __locks_delete_block(waiter); locks_wake_up_blocks()
674 if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) locks_wake_up_blocks()
675 waiter->fl_lmops->lm_notify(waiter); locks_wake_up_blocks()
677 wake_up(&waiter->fl_wait); locks_wake_up_blocks()
2465 * @waiter: the lock which was waiting
2470 posix_unblock_lock(struct file_lock *waiter) posix_unblock_lock() argument
2475 if (waiter->fl_next) posix_unblock_lock()
2476 __locks_delete_block(waiter); posix_unblock_lock()
631 __locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) __locks_insert_block() argument
642 locks_insert_block(struct file_lock *blocker, struct file_lock *waiter) locks_insert_block() argument
H A Ddirect-io.c126 struct task_struct *waiter; /* waiting task (NULL if none) */ member in struct:dio
299 if (remaining == 1 && dio->waiter) dio_bio_end_aio()
300 wake_up_process(dio->waiter); dio_bio_end_aio()
329 if (--dio->refcount == 1 && dio->waiter) dio_bio_end_io()
330 wake_up_process(dio->waiter); dio_bio_end_io()
440 dio->waiter = current; dio_await_one()
445 dio->waiter = NULL; dio_await_one()
/linux-4.1.27/kernel/sched/
H A Dwait.c262 * @mode: runstate of the waiter to be woken
269 * Wakes up the next waiter if the caller is concurrently
272 * This prevents waiter starvation where an exclusive waiter
274 * the next waiter.
464 * wake_up_bit - wake up a waiter on a bit
570 * wake_up_atomic_t - Wake up a waiter on a atomic_t
576 * check is done by the waiter's wake function, not the by the waker itself).
H A Ddeadline.c902 * Use the scheduling parameters of the top pi-waiter enqueue_task_dl()
/linux-4.1.27/include/linux/
H A Drtmutex.h26 * @waiters_leftmost: top waiter
H A Dpagemap.h532 * Add an arbitrary waiter to a page's wait queue
534 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
H A Dfs.h1137 static inline int posix_unblock_lock(struct file_lock *waiter) posix_unblock_lock() argument
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_intr.c140 * wakeup the waiter, we just won't give him the msg. handle_vq()
141 * It is assumed the waiter will deal with this... handle_vq()
164 * wakeup the waiter. handle_vq()
/linux-4.1.27/arch/x86/crypto/sha-mb/
H A Dsha1_mb.c436 list_del(&rctx->waiter); sha_complete_job()
454 list_del(&req_ctx->waiter); sha_complete_job()
485 list_add_tail(&rctx->waiter, &cstate->work_list); sha1_mb_add_list()
833 struct mcryptd_hash_request_ctx, waiter); sha1_mb_flusher()
850 struct mcryptd_hash_request_ctx, waiter); sha1_mb_flusher()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dlu_object.c539 wait_queue_t *waiter, htable_lookup()
574 init_waitqueue_entry(waiter, current); htable_lookup()
575 add_wait_queue(&bkt->lsb_marche_funebre, waiter); htable_lookup()
624 wait_queue_t *waiter) lu_object_find_try()
650 * If dying object is found during index search, add @waiter to the lu_object_find_try()
659 o = htable_lookup(s, &bd, f, waiter, &version); lu_object_find_try()
676 shadow = htable_lookup(s, &bd, f, waiter, &version); lu_object_find_try()
712 * lu_object_find_try() already added waiter into the lu_object_find_at()
2078 wait_queue_t waiter; lu_object_assign_fid() local
2087 shadow = htable_lookup(s, &bd, fid, &waiter, &version); lu_object_assign_fid()
536 htable_lookup(struct lu_site *s, struct cfs_hash_bd *bd, const struct lu_fid *f, wait_queue_t *waiter, __u64 *version) htable_lookup() argument
620 lu_object_find_try(const struct lu_env *env, struct lu_device *dev, const struct lu_fid *f, const struct lu_object_conf *conf, wait_queue_t *waiter) lu_object_find_try() argument
H A Dcl_lock.c925 wait_queue_t waiter; cl_lock_state_wait() local
942 init_waitqueue_entry(&waiter, current); cl_lock_state_wait()
943 add_wait_queue(&lock->cll_wq, &waiter); cl_lock_state_wait()
960 remove_wait_queue(&lock->cll_wq, &waiter); cl_lock_state_wait()
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlov_object.c298 wait_queue_t *waiter; lov_subobject_kill() local
315 waiter = &lov_env_info(env)->lti_waiter; lov_subobject_kill()
316 init_waitqueue_entry(waiter, current); lov_subobject_kill()
317 add_wait_queue(&bkt->lsb_marche_funebre, waiter); lov_subobject_kill()
333 remove_wait_queue(&bkt->lsb_marche_funebre, waiter); lov_subobject_kill()
/linux-4.1.27/drivers/staging/lustre/lustre/lclient/
H A Dlcommon_cl.c1160 wait_queue_t waiter; cl_object_put_last() local
1168 init_waitqueue_entry(&waiter, current); cl_object_put_last()
1169 add_wait_queue(&bkt->lsb_marche_funebre, &waiter); cl_object_put_last()
1179 remove_wait_queue(&bkt->lsb_marche_funebre, &waiter); cl_object_put_last()
/linux-4.1.27/drivers/block/zram/
H A Dzcomp.c137 /* add stream back to idle list and wake up waiter or free the stream */ zcomp_strm_multi_release()
/linux-4.1.27/drivers/staging/dgnc/
H A Ddgnc_driver.c662 * waiter needs to be woken up, and (b) whether the poller needs to
/linux-4.1.27/drivers/uwb/
H A Dwhc-rc.c418 * host is quiescing and up it (so it will chain to the next waiter).
/linux-4.1.27/fs/dlm/
H A Dplock.c77 abandoned waiter. So, we have to insert the unlock-close when the
H A Dlock.c5139 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " dlm_recover_waiters_pre()
5291 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " dlm_recover_waiters_post()
5348 log_error(ls, "waiter %x msg %d r_nodeid %d " dlm_recover_waiters_post()
/linux-4.1.27/tools/testing/selftests/memfd/
H A Dmemfd_test.c498 /* dummy waiter; SIGTERM terminates us anyway */ idle_thread_fn()
/linux-4.1.27/drivers/block/
H A Dnvme-core.c2427 struct task_struct *waiter; member in struct:nvme_delq_ctx
2434 dq->waiter = current; nvme_wait_dq()
2464 if (dq->waiter) nvme_put_dq()
2465 wake_up_process(dq->waiter); nvme_put_dq()
2549 dq.waiter = NULL; nvme_disable_io_queues()
/linux-4.1.27/mm/
H A Dfilemap.c714 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
716 * @waiter: Waiter to add to the queue
718 * Add an arbitrary @waiter to the wait queue for the nominated @page.
720 void add_page_wait_queue(struct page *page, wait_queue_t *waiter) add_page_wait_queue() argument
726 __add_wait_queue(q, waiter); add_page_wait_queue()
H A Dmemcontrol.c232 * waiter for changes related to this event. Use eventfd_signal()
/linux-4.1.27/drivers/gpu/drm/rcar-du/
H A Drcar_du_kms.c416 /* Complete the commit, wake up any waiter. */ rcar_du_atomic_complete()
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Drx.c304 /* Check waiter didn't time out waiting for the answer... */ i2400m_rx_ctl_ack()
307 d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); i2400m_rx_ctl_ack()
/linux-4.1.27/fs/xfs/
H A Dxfs_log_cil.c489 * Hence the waiter will either find the commit sequence on the xlog_cil_push()
492 * so the waiter will have to continue trying to check the CIL xlog_cil_push()
/linux-4.1.27/fs/fuse/
H A Ddev.c392 /* Wake up next waiter, if any */
2102 * waiter is woken up. This will make request_wait_answer() wait
/linux-4.1.27/drivers/s390/block/
H A Ddasd_devmap.c634 * Reference counter dropped to zero. Wake up waiter
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_flock.c608 /* Need to wake up the waiter if we were evicted */ ldlm_flock_completion_ast()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_fence.c923 * radeon_fence_driver_force_completion - force all fence waiter to complete
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_fence.c97 * irq is received. When the last fence waiter is gone, that IRQ is masked
/linux-4.1.27/fs/afs/
H A Dinternal.h61 /* synchronous call waiter and call dispatched notification */
/linux-4.1.27/fs/gfs2/
H A Dlog.c335 * wake the next waiter on the list.
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_cache.c1556 * Adding a cache waiter will trigger urgent write-out no matter what osc_enter_cache()
2501 * this is called when a sync waiter receives an interruption. Its job is to
/linux-4.1.27/fs/ocfs2/dlm/
H A Ddlmrecovery.c1774 * up-to-date, and the change will be ordered properly for the waiter.
1775 * We will *not* attempt to modify the lock underneath the waiter.
/linux-4.1.27/drivers/scsi/bnx2fc/
H A Dbnx2fc_io.c1612 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); bnx2fc_process_tm_compl()
/linux-4.1.27/net/vmw_vsock/
H A Dvmci_transport.c1304 /* Signify the socket is connected and wakeup the waiter in vmci_transport_recv_connecting_client()
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_bsg.c54 /* Event type and waiter identifiers */
1241 "waiter\n"); lpfc_bsg_hba_set_event()
/linux-4.1.27/block/
H A Dblk-core.c414 * With queue marked dead, any woken up waiter will fail the
/linux-4.1.27/net/ceph/
H A Dosd_client.c1733 complete_all(&req->r_safe_completion); /* fsync waiter */ complete_request()
/linux-4.1.27/drivers/staging/dgap/
H A Ddgap.c2287 * waiter needs to be woken up, and (b) whether the poller needs to

Completed in 1681 milliseconds