waiter 240 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event_waiter *waiter; waiter 243 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) waiter 244 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->event = NULL; waiter 391 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event_waiter *waiter; waiter 400 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) waiter 401 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->activated = true; waiter 545 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event_waiter *waiter, waiter 553 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->event = ev; waiter 554 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter->activated = ev->signaled; waiter 560 drivers/gpu/drm/amd/amdkfd/kfd_events.c static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter) waiter 562 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event *ev = waiter->event; waiter 567 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (!waiter->activated) waiter 568 drivers/gpu/drm/amd/amdkfd/kfd_events.c add_wait_queue(&ev->wq, &waiter->wait); waiter 613 drivers/gpu/drm/amd/amdkfd/kfd_events.c struct kfd_event_waiter *waiter; waiter 618 drivers/gpu/drm/amd/amdkfd/kfd_events.c waiter = &event_waiters[i]; waiter 619 drivers/gpu/drm/amd/amdkfd/kfd_events.c event = waiter->event; waiter 620 drivers/gpu/drm/amd/amdkfd/kfd_events.c if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) { waiter 960 drivers/gpu/drm/i915/gt/intel_lrc.c container_of(p->waiter, typeof(*w), sched); waiter 422 drivers/gpu/drm/i915/i915_scheduler.c dep->waiter = node; waiter 65 drivers/gpu/drm/i915/i915_scheduler_types.h struct i915_sched_node *waiter; waiter 36 drivers/gpu/host1x/intr.c static bool add_waiter_to_queue(struct host1x_waitlist *waiter, waiter 40 drivers/gpu/host1x/intr.c u32 thresh = waiter->thresh; waiter 44 drivers/gpu/host1x/intr.c list_add(&waiter->list, &pos->list); waiter 48 drivers/gpu/host1x/intr.c list_add(&waiter->list, queue); waiter 60 drivers/gpu/host1x/intr.c struct host1x_waitlist *waiter, *next, *prev; waiter 62 drivers/gpu/host1x/intr.c list_for_each_entry_safe(waiter, next, head, list) { waiter 63 drivers/gpu/host1x/intr.c if ((s32)(waiter->thresh - sync) > 0) waiter 66 drivers/gpu/host1x/intr.c dest = completed + waiter->action; waiter 69 drivers/gpu/host1x/intr.c if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE && waiter 73 drivers/gpu/host1x/intr.c if (prev->data == waiter->data) { waiter 80 drivers/gpu/host1x/intr.c if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { waiter 81 drivers/gpu/host1x/intr.c list_del(&waiter->list); waiter 82 drivers/gpu/host1x/intr.c kref_put(&waiter->refcount, waiter_release); waiter 84 drivers/gpu/host1x/intr.c list_move_tail(&waiter->list, dest); waiter 99 drivers/gpu/host1x/intr.c static void action_submit_complete(struct host1x_waitlist *waiter) waiter 101 drivers/gpu/host1x/intr.c struct host1x_channel *channel = waiter->data; waiter 107 drivers/gpu/host1x/intr.c waiter->count, waiter->thresh); waiter 111 drivers/gpu/host1x/intr.c static void action_wakeup(struct host1x_waitlist *waiter) waiter 113 drivers/gpu/host1x/intr.c wait_queue_head_t *wq = waiter->data; waiter 118 drivers/gpu/host1x/intr.c static void action_wakeup_interruptible(struct host1x_waitlist *waiter) waiter 120 drivers/gpu/host1x/intr.c wait_queue_head_t *wq = waiter->data; waiter 125 drivers/gpu/host1x/intr.c typedef void (*action_handler)(struct host1x_waitlist *waiter); waiter 140 drivers/gpu/host1x/intr.c struct host1x_waitlist *waiter, *next; waiter 142 drivers/gpu/host1x/intr.c list_for_each_entry_safe(waiter, next, head, list) { waiter 143 drivers/gpu/host1x/intr.c list_del(&waiter->list); waiter 144 drivers/gpu/host1x/intr.c handler(waiter); waiter 145 drivers/gpu/host1x/intr.c WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != waiter 147 drivers/gpu/host1x/intr.c kref_put(&waiter->refcount, waiter_release); waiter 205 drivers/gpu/host1x/intr.c void *data, struct host1x_waitlist *waiter, waiter 210 drivers/gpu/host1x/intr.c if (waiter == NULL) { waiter 216 drivers/gpu/host1x/intr.c INIT_LIST_HEAD(&waiter->list); waiter 217 drivers/gpu/host1x/intr.c kref_init(&waiter->refcount); waiter 219 drivers/gpu/host1x/intr.c kref_get(&waiter->refcount); waiter 220 drivers/gpu/host1x/intr.c waiter->thresh = thresh; waiter 221 drivers/gpu/host1x/intr.c waiter->action = action; waiter 222 drivers/gpu/host1x/intr.c atomic_set(&waiter->state, WLS_PENDING); waiter 223 drivers/gpu/host1x/intr.c waiter->data = data; waiter 224 drivers/gpu/host1x/intr.c waiter->count = 1; waiter 230 drivers/gpu/host1x/intr.c if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) { waiter 242 drivers/gpu/host1x/intr.c *ref = waiter; waiter 248 drivers/gpu/host1x/intr.c struct host1x_waitlist *waiter = ref; waiter 251 drivers/gpu/host1x/intr.c while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == waiter 259 drivers/gpu/host1x/intr.c kref_put(&waiter->refcount, waiter_release); waiter 316 drivers/gpu/host1x/intr.c struct host1x_waitlist *waiter, *next; waiter 318 drivers/gpu/host1x/intr.c list_for_each_entry_safe(waiter, next, waiter 320 drivers/gpu/host1x/intr.c if (atomic_cmpxchg(&waiter->state, waiter 322 drivers/gpu/host1x/intr.c list_del(&waiter->list); waiter 323 drivers/gpu/host1x/intr.c kref_put(&waiter->refcount, waiter_release); waiter 70 drivers/gpu/host1x/intr.h void *data, struct host1x_waitlist *waiter, waiter 210 drivers/gpu/host1x/syncpt.c struct host1x_waitlist *waiter; waiter 240 drivers/gpu/host1x/syncpt.c waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); waiter 241 drivers/gpu/host1x/syncpt.c if (!waiter) { waiter 249 drivers/gpu/host1x/syncpt.c &wq, waiter, &ref); waiter 120 drivers/md/persistent-data/dm-block-manager.c static void __wait(struct waiter *w) waiter 134 drivers/md/persistent-data/dm-block-manager.c static void __wake_waiter(struct waiter *w) waiter 150 drivers/md/persistent-data/dm-block-manager.c struct waiter *w, *tmp; waiter 194 drivers/md/persistent-data/dm-block-manager.c struct waiter w; waiter 257 drivers/md/persistent-data/dm-block-manager.c struct waiter w; waiter 253 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct bulk_waiter_node *waiter, *next; waiter 255 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_for_each_entry_safe(waiter, next, waiter 257 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_del(&waiter->list); waiter 260 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, waiter->pid); waiter 261 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c kfree(waiter); waiter 439 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct bulk_waiter_node *waiter = NULL; waiter 450 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_for_each_entry(waiter, &instance->bulk_waiter_list, list) { waiter 451 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter->pid == current->pid) { waiter 452 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_del(&waiter->list); waiter 458 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter) { waiter 459 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; waiter 476 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (!waiter) { waiter 477 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL); waiter 478 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (!waiter) { waiter 485 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c status = vchiq_bulk_transfer(handle, data, size, &waiter->bulk_waiter, waiter 488 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c !waiter->bulk_waiter.bulk) { waiter 489 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; waiter 499 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c kfree(waiter); waiter 501 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter->pid = current->pid; waiter 503 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_add(&waiter->list, &instance->bulk_waiter_list); waiter 507 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, current->pid); waiter 1043 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct bulk_waiter_node *waiter = NULL; waiter 1062 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter = kzalloc(sizeof(struct bulk_waiter_node), waiter 1064 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (!waiter) { waiter 1069 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c args.userdata = &waiter->bulk_waiter; waiter 1072 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_for_each_entry(waiter, &instance->bulk_waiter_list, waiter 1074 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter->pid == current->pid) { waiter 1075 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_del(&waiter->list); waiter 1080 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (!waiter) { waiter 1088 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c "found bulk_waiter %pK for pid %d", waiter, waiter 1090 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c args.userdata = &waiter->bulk_waiter; waiter 1096 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (!waiter) waiter 1100 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c !waiter->bulk_waiter.bulk) { waiter 1101 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c if (waiter->bulk_waiter.bulk) { waiter 1105 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter->bulk_waiter.bulk->userdata = NULL; waiter 1108 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c kfree(waiter); waiter 1112 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter->pid = current->pid; waiter 1114 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_add(&waiter->list, &instance->bulk_waiter_list); waiter 1118 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, current->pid); waiter 2048 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c struct bulk_waiter_node *waiter, *next; waiter 2050 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_for_each_entry_safe(waiter, next, waiter 2052 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c list_del(&waiter->list); waiter 2055 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c waiter, waiter->pid); waiter 2056 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c kfree(waiter); waiter 1222 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c struct bulk_waiter *waiter; waiter 1225 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c waiter = bulk->userdata; waiter 1226 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (waiter) { waiter 1227 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c waiter->actual = bulk->actual; waiter 1228 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c complete(&waiter->event); waiter 76 drivers/tty/tty_ldsem.c struct ldsem_waiter *waiter, *next; waiter 94 drivers/tty/tty_ldsem.c list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { waiter 95 drivers/tty/tty_ldsem.c tsk = waiter->task; waiter 96 drivers/tty/tty_ldsem.c smp_store_release(&waiter->task, NULL); waiter 121 drivers/tty/tty_ldsem.c struct ldsem_waiter *waiter; waiter 123 drivers/tty/tty_ldsem.c waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); waiter 124 drivers/tty/tty_ldsem.c wake_up_process(waiter->task); waiter 158 drivers/tty/tty_ldsem.c struct ldsem_waiter waiter; waiter 180 drivers/tty/tty_ldsem.c list_add_tail(&waiter.list, &sem->read_wait); waiter 183 drivers/tty/tty_ldsem.c waiter.task = current; waiter 196 drivers/tty/tty_ldsem.c if (!smp_load_acquire(&waiter.task)) waiter 212 drivers/tty/tty_ldsem.c if (waiter.task) { waiter 215 drivers/tty/tty_ldsem.c list_del(&waiter.list); waiter 217 drivers/tty/tty_ldsem.c put_task_struct(waiter.task); waiter 232 drivers/tty/tty_ldsem.c struct ldsem_waiter waiter; waiter 253 drivers/tty/tty_ldsem.c list_add_tail(&waiter.list, &sem->write_wait); waiter 255 drivers/tty/tty_ldsem.c waiter.task = current; waiter 272 drivers/tty/tty_ldsem.c list_del(&waiter.list); waiter 196 fs/block_dev.c struct task_struct *waiter = bio->bi_private; waiter 199 fs/block_dev.c blk_wake_io_task(waiter); waiter 279 fs/block_dev.c struct task_struct *waiter; waiter 323 fs/block_dev.c struct task_struct *waiter = dio->waiter; waiter 325 fs/block_dev.c WRITE_ONCE(dio->waiter, NULL); waiter 326 fs/block_dev.c blk_wake_io_task(waiter); waiter 362 fs/block_dev.c dio->waiter = current; waiter 448 fs/block_dev.c if (!READ_ONCE(dio->waiter)) waiter 137 fs/direct-io.c struct task_struct *waiter; /* waiting task (NULL if none) */ waiter 361 fs/direct-io.c if (remaining == 1 && dio->waiter) waiter 362 fs/direct-io.c wake_up_process(dio->waiter); waiter 403 fs/direct-io.c if (--dio->refcount == 1 && dio->waiter) waiter 404 fs/direct-io.c wake_up_process(dio->waiter); waiter 518 fs/direct-io.c dio->waiter = current; waiter 525 fs/direct-io.c dio->waiter = NULL; waiter 39 fs/iomap/direct-io.c struct task_struct *waiter; waiter 156 fs/iomap/direct-io.c struct task_struct *waiter = dio->submit.waiter; waiter 157 fs/iomap/direct-io.c WRITE_ONCE(dio->submit.waiter, NULL); waiter 158 fs/iomap/direct-io.c blk_wake_io_task(waiter); waiter 427 fs/iomap/direct-io.c dio->submit.waiter = current; waiter 549 fs/iomap/direct-io.c if (!READ_ONCE(dio->submit.waiter)) waiter 705 fs/locks.c static void locks_insert_global_blocked(struct file_lock *waiter) waiter 709 fs/locks.c hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); waiter 712 fs/locks.c static void locks_delete_global_blocked(struct file_lock *waiter) waiter 716 fs/locks.c hash_del(&waiter->fl_link); waiter 724 fs/locks.c static void __locks_delete_block(struct file_lock *waiter) waiter 726 fs/locks.c locks_delete_global_blocked(waiter); waiter 727 fs/locks.c list_del_init(&waiter->fl_blocked_member); waiter 733 fs/locks.c struct file_lock *waiter; waiter 735 fs/locks.c waiter = list_first_entry(&blocker->fl_blocked_requests, waiter 737 fs/locks.c __locks_delete_block(waiter); waiter 738 fs/locks.c if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) waiter 739 fs/locks.c waiter->fl_lmops->lm_notify(waiter); waiter 741 fs/locks.c wake_up(&waiter->fl_wait); waiter 748 fs/locks.c smp_store_release(&waiter->fl_blocker, NULL); waiter 758 fs/locks.c int locks_delete_block(struct file_lock *waiter) waiter 783 fs/locks.c if (!smp_load_acquire(&waiter->fl_blocker) && waiter 784 fs/locks.c list_empty(&waiter->fl_blocked_requests)) waiter 788 fs/locks.c if (waiter->fl_blocker) waiter 790 fs/locks.c __locks_wake_up_blocks(waiter); waiter 791 fs/locks.c __locks_delete_block(waiter); waiter 797 fs/locks.c smp_store_release(&waiter->fl_blocker, NULL); waiter 819 fs/locks.c struct file_lock *waiter, waiter 824 fs/locks.c BUG_ON(!list_empty(&waiter->fl_blocked_member)); waiter 828 fs/locks.c if (conflict(fl, waiter)) { waiter 832 fs/locks.c waiter->fl_blocker = blocker; waiter 833 fs/locks.c list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); waiter 835 fs/locks.c locks_insert_global_blocked(waiter); waiter 841 fs/locks.c __locks_wake_up_blocks(waiter); waiter 846 fs/locks.c struct file_lock *waiter, waiter 851 fs/locks.c __locks_insert_block(blocker, waiter, conflict); waiter 7084 fs/nfs/nfs4proc.c struct nfs4_lock_waiter *waiter = wait->private; waiter 7090 fs/nfs/nfs4proc.c *wowner = waiter->owner; waiter 7097 fs/nfs/nfs4proc.c if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) waiter 7102 fs/nfs/nfs4proc.c wait->private = waiter->task; waiter 7106 fs/nfs/nfs4proc.c wait->private = waiter; waiter 7121 fs/nfs/nfs4proc.c struct nfs4_lock_waiter waiter = { .task = current, waiter 7131 fs/nfs/nfs4proc.c wait.private = &waiter; waiter 1823 include/linux/blkdev.h static inline void blk_wake_io_task(struct task_struct *waiter) waiter 1830 include/linux/blkdev.h if (waiter == current) waiter 1833 include/linux/blkdev.h wake_up_process(waiter); waiter 1257 include/linux/fs.h static inline int locks_delete_block(struct file_lock *waiter) waiter 548 include/linux/pagemap.h extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); waiter 30 kernel/locking/mutex-debug.c void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) waiter 32 kernel/locking/mutex-debug.c memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); waiter 33 kernel/locking/mutex-debug.c waiter->magic = waiter; waiter 34 kernel/locking/mutex-debug.c INIT_LIST_HEAD(&waiter->list); waiter 37 kernel/locking/mutex-debug.c void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) waiter 41 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); waiter 42 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); waiter 45 kernel/locking/mutex-debug.c void debug_mutex_free_waiter(struct mutex_waiter *waiter) waiter 47 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list)); waiter 48 kernel/locking/mutex-debug.c memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter)); waiter 51 kernel/locking/mutex-debug.c void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, waiter 57 kernel/locking/mutex-debug.c task->blocked_on = waiter; waiter 60 kernel/locking/mutex-debug.c void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, waiter 63 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); waiter 64 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(waiter->task != task); waiter 65 kernel/locking/mutex-debug.c DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); waiter 68 kernel/locking/mutex-debug.c list_del_init(&waiter->list); waiter 69 kernel/locking/mutex-debug.c waiter->task = NULL; waiter 18 kernel/locking/mutex-debug.h struct mutex_waiter *waiter); waiter 20 kernel/locking/mutex-debug.h struct mutex_waiter *waiter); waiter 21 kernel/locking/mutex-debug.h extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); waiter 23 kernel/locking/mutex-debug.h struct mutex_waiter *waiter, waiter 25 kernel/locking/mutex-debug.h extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, waiter 198 kernel/locking/mutex.c static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) waiter 200 kernel/locking/mutex.c return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; waiter 208 kernel/locking/mutex.c __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, waiter 211 kernel/locking/mutex.c debug_mutex_add_waiter(lock, waiter, current); waiter 213 kernel/locking/mutex.c list_add_tail(&waiter->list, list); waiter 214 kernel/locking/mutex.c if (__mutex_waiter_is_first(lock, waiter)) waiter 367 kernel/locking/mutex.c __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, waiter 373 kernel/locking/mutex.c if (waiter->ww_ctx->acquired > 0 && waiter 374 kernel/locking/mutex.c __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) { waiter 375 kernel/locking/mutex.c debug_mutex_wake_waiter(lock, waiter); waiter 376 kernel/locking/mutex.c wake_up_process(waiter->task); waiter 503 kernel/locking/mutex.c struct mutex_waiter *waiter) waiter 530 kernel/locking/mutex.c if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS)) waiter 537 kernel/locking/mutex.c if (waiter && !__mutex_waiter_is_first(lock, waiter)) waiter 551 kernel/locking/mutex.c struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) waiter 574 kernel/locking/mutex.c if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { waiter 639 kernel/locking/mutex.c const bool use_ww_ctx, struct mutex_waiter *waiter) waiter 641 kernel/locking/mutex.c if (!waiter) { waiter 673 kernel/locking/mutex.c if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) waiter 685 kernel/locking/mutex.c if (!waiter) waiter 692 kernel/locking/mutex.c if (!waiter) waiter 715 kernel/locking/mutex.c const bool use_ww_ctx, struct mutex_waiter *waiter) waiter 805 kernel/locking/mutex.c __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, waiter 829 kernel/locking/mutex.c cur = waiter; waiter 852 kernel/locking/mutex.c __ww_mutex_add_waiter(struct mutex_waiter *waiter, waiter 861 kernel/locking/mutex.c __mutex_add_waiter(lock, waiter, &lock->wait_list); waiter 901 kernel/locking/mutex.c __mutex_add_waiter(lock, waiter, pos); waiter 930 kernel/locking/mutex.c struct mutex_waiter waiter; waiter 979 kernel/locking/mutex.c debug_mutex_lock_common(lock, &waiter); waiter 985 kernel/locking/mutex.c __mutex_add_waiter(lock, &waiter, &lock->wait_list); waiter 989 kernel/locking/mutex.c waiter.ww_ctx = MUTEX_POISON_WW_CTX; waiter 996 kernel/locking/mutex.c ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); waiter 1000 kernel/locking/mutex.c waiter.ww_ctx = ww_ctx; waiter 1003 kernel/locking/mutex.c waiter.task = current; waiter 1027 kernel/locking/mutex.c ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx); waiter 1040 kernel/locking/mutex.c first = __mutex_waiter_is_first(lock, &waiter); waiter 1052 kernel/locking/mutex.c (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter))) waiter 1067 kernel/locking/mutex.c !__mutex_waiter_is_first(lock, &waiter)) waiter 1071 kernel/locking/mutex.c mutex_remove_waiter(lock, &waiter, current); waiter 1075 kernel/locking/mutex.c debug_mutex_free_waiter(&waiter); waiter 1090 kernel/locking/mutex.c mutex_remove_waiter(lock, &waiter, current); waiter 1093 kernel/locking/mutex.c debug_mutex_free_waiter(&waiter); waiter 1265 kernel/locking/mutex.c struct mutex_waiter *waiter = waiter 1269 kernel/locking/mutex.c next = waiter->task; waiter 1271 kernel/locking/mutex.c debug_mutex_wake_waiter(lock, waiter); waiter 13 kernel/locking/mutex.h #define mutex_remove_waiter(lock, waiter, task) \ waiter 14 kernel/locking/mutex.h __list_del((waiter)->list.prev, (waiter)->list.next) waiter 16 kernel/locking/mutex.h #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) waiter 17 kernel/locking/mutex.h #define debug_mutex_free_waiter(waiter) do { } while (0) waiter 18 kernel/locking/mutex.h #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) waiter 23 kernel/locking/mutex.h debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) waiter 87 kernel/locking/rtmutex-debug.c void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) waiter 91 kernel/locking/rtmutex-debug.c if (!waiter->deadlock_lock || !debug_locks) waiter 95 kernel/locking/rtmutex-debug.c task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); waiter 117 kernel/locking/rtmutex-debug.c printk_lock(waiter->lock, 1); waiter 121 kernel/locking/rtmutex-debug.c printk_lock(waiter->deadlock_lock, 1); waiter 158 kernel/locking/rtmutex-debug.c void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) waiter 160 kernel/locking/rtmutex-debug.c memset(waiter, 0x11, sizeof(*waiter)); waiter 161 kernel/locking/rtmutex-debug.c waiter->deadlock_task_pid = NULL; waiter 164 kernel/locking/rtmutex-debug.c void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) waiter 166 kernel/locking/rtmutex-debug.c put_pid(waiter->deadlock_task_pid); waiter 167 kernel/locking/rtmutex-debug.c memset(waiter, 0x22, sizeof(*waiter)); waiter 13 kernel/locking/rtmutex-debug.h extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); waiter 14 kernel/locking/rtmutex-debug.h extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); waiter 22 kernel/locking/rtmutex-debug.h struct rt_mutex_waiter *waiter, waiter 24 kernel/locking/rtmutex-debug.h extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); waiter 28 kernel/locking/rtmutex-debug.h static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, waiter 31 kernel/locking/rtmutex-debug.h return (waiter != NULL); waiter 273 kernel/locking/rtmutex.c rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) waiter 283 kernel/locking/rtmutex.c if (rt_mutex_waiter_less(waiter, entry)) { waiter 291 kernel/locking/rtmutex.c rb_link_node(&waiter->tree_entry, parent, link); waiter 292 kernel/locking/rtmutex.c rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost); waiter 296 kernel/locking/rtmutex.c rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) waiter 298 kernel/locking/rtmutex.c if (RB_EMPTY_NODE(&waiter->tree_entry)) waiter 301 kernel/locking/rtmutex.c rb_erase_cached(&waiter->tree_entry, &lock->waiters); waiter 302 kernel/locking/rtmutex.c RB_CLEAR_NODE(&waiter->tree_entry); waiter 306 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) waiter 316 kernel/locking/rtmutex.c if (rt_mutex_waiter_less(waiter, entry)) { waiter 324 kernel/locking/rtmutex.c rb_link_node(&waiter->pi_tree_entry, parent, link); waiter 325 kernel/locking/rtmutex.c rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost); waiter 329 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) waiter 331 kernel/locking/rtmutex.c if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) waiter 334 kernel/locking/rtmutex.c rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); waiter 335 kernel/locking/rtmutex.c RB_CLEAR_NODE(&waiter->pi_tree_entry); waiter 363 kernel/locking/rtmutex.c static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, waiter 373 kernel/locking/rtmutex.c return debug_rt_mutex_detect_deadlock(waiter, chwalk); waiter 456 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; waiter 508 kernel/locking/rtmutex.c waiter = task->pi_blocked_on; waiter 519 kernel/locking/rtmutex.c if (!waiter) waiter 538 kernel/locking/rtmutex.c if (next_lock != waiter->lock) waiter 570 kernel/locking/rtmutex.c if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { waiter 580 kernel/locking/rtmutex.c lock = waiter->lock; waiter 664 kernel/locking/rtmutex.c rt_mutex_dequeue(lock, waiter); waiter 682 kernel/locking/rtmutex.c waiter->prio = task->prio; waiter 683 kernel/locking/rtmutex.c waiter->deadline = task->dl.deadline; waiter 685 kernel/locking/rtmutex.c rt_mutex_enqueue(lock, waiter); waiter 715 kernel/locking/rtmutex.c if (waiter == rt_mutex_top_waiter(lock)) { waiter 723 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(task, waiter); waiter 726 kernel/locking/rtmutex.c } else if (prerequeue_top_waiter == waiter) { waiter 737 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(task, waiter); waiter 738 kernel/locking/rtmutex.c waiter = rt_mutex_top_waiter(lock); waiter 739 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(task, waiter); waiter 784 kernel/locking/rtmutex.c if (!detect_deadlock && waiter != top_waiter) waiter 808 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter) waiter 842 kernel/locking/rtmutex.c if (waiter) { waiter 847 kernel/locking/rtmutex.c if (waiter != rt_mutex_top_waiter(lock)) waiter 854 kernel/locking/rtmutex.c rt_mutex_dequeue(lock, waiter); waiter 929 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter, waiter 934 kernel/locking/rtmutex.c struct rt_mutex_waiter *top_waiter = waiter; waiter 953 kernel/locking/rtmutex.c waiter->task = task; waiter 954 kernel/locking/rtmutex.c waiter->lock = lock; waiter 955 kernel/locking/rtmutex.c waiter->prio = task->prio; waiter 956 kernel/locking/rtmutex.c waiter->deadline = task->dl.deadline; waiter 961 kernel/locking/rtmutex.c rt_mutex_enqueue(lock, waiter); waiter 963 kernel/locking/rtmutex.c task->pi_blocked_on = waiter; waiter 971 kernel/locking/rtmutex.c if (waiter == rt_mutex_top_waiter(lock)) { waiter 973 kernel/locking/rtmutex.c rt_mutex_enqueue_pi(owner, waiter); waiter 978 kernel/locking/rtmutex.c } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { waiter 1004 kernel/locking/rtmutex.c next_lock, waiter, task); waiter 1020 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter; waiter 1024 kernel/locking/rtmutex.c waiter = rt_mutex_top_waiter(lock); waiter 1033 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(current, waiter); waiter 1057 kernel/locking/rtmutex.c wake_q_add(wake_q, waiter->task); waiter 1068 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter) waiter 1070 kernel/locking/rtmutex.c bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); waiter 1077 kernel/locking/rtmutex.c rt_mutex_dequeue(lock, waiter); waiter 1090 kernel/locking/rtmutex.c rt_mutex_dequeue_pi(owner, waiter); waiter 1127 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter; waiter 1133 kernel/locking/rtmutex.c waiter = task->pi_blocked_on; waiter 1134 kernel/locking/rtmutex.c if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { waiter 1138 kernel/locking/rtmutex.c next_lock = waiter->lock; waiter 1148 kernel/locking/rtmutex.c void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) waiter 1150 kernel/locking/rtmutex.c debug_rt_mutex_init_waiter(waiter); waiter 1151 kernel/locking/rtmutex.c RB_CLEAR_NODE(&waiter->pi_tree_entry); waiter 1152 kernel/locking/rtmutex.c RB_CLEAR_NODE(&waiter->tree_entry); waiter 1153 kernel/locking/rtmutex.c waiter->task = NULL; waiter 1169 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter) waiter 1175 kernel/locking/rtmutex.c if (try_to_take_rt_mutex(lock, current, waiter)) waiter 1194 kernel/locking/rtmutex.c debug_rt_mutex_print_deadlock(waiter); waiter 1234 kernel/locking/rtmutex.c struct rt_mutex_waiter waiter; waiter 1238 kernel/locking/rtmutex.c rt_mutex_init_waiter(&waiter); waiter 1262 kernel/locking/rtmutex.c ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); waiter 1266 kernel/locking/rtmutex.c ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); waiter 1270 kernel/locking/rtmutex.c remove_waiter(lock, &waiter); waiter 1271 kernel/locking/rtmutex.c rt_mutex_handle_deadlock(ret, chwalk, &waiter); waiter 1286 kernel/locking/rtmutex.c debug_rt_mutex_free_waiter(&waiter); waiter 1748 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter, waiter 1759 kernel/locking/rtmutex.c ret = task_blocks_on_rt_mutex(lock, waiter, task, waiter 1772 kernel/locking/rtmutex.c debug_rt_mutex_print_deadlock(waiter); waiter 1797 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter, waiter 1803 kernel/locking/rtmutex.c ret = __rt_mutex_start_proxy_lock(lock, waiter, task); waiter 1805 kernel/locking/rtmutex.c remove_waiter(lock, waiter); waiter 1850 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter) waiter 1857 kernel/locking/rtmutex.c ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); waiter 1889 kernel/locking/rtmutex.c struct rt_mutex_waiter *waiter) waiter 1905 kernel/locking/rtmutex.c try_to_take_rt_mutex(lock, current, waiter); waiter 1911 kernel/locking/rtmutex.c remove_waiter(lock, waiter); waiter 138 kernel/locking/rtmutex_common.h extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); waiter 140 kernel/locking/rtmutex_common.h struct rt_mutex_waiter *waiter, waiter 143 kernel/locking/rtmutex_common.h struct rt_mutex_waiter *waiter, waiter 147 kernel/locking/rtmutex_common.h struct rt_mutex_waiter *waiter); waiter 149 kernel/locking/rtmutex_common.h struct rt_mutex_waiter *waiter); waiter 405 kernel/locking/rwsem.c struct rwsem_waiter *waiter, *tmp; waiter 415 kernel/locking/rwsem.c waiter = rwsem_first_waiter(sem); waiter 417 kernel/locking/rwsem.c if (waiter->type == RWSEM_WAITING_FOR_WRITE) { waiter 426 kernel/locking/rwsem.c wake_q_add(wake_q, waiter->task); waiter 456 kernel/locking/rwsem.c time_after(jiffies, waiter->timeout)) { waiter 470 kernel/locking/rwsem.c owner = waiter->task; waiter 471 kernel/locking/rwsem.c if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) { waiter 502 kernel/locking/rwsem.c list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { waiter 503 kernel/locking/rwsem.c if (waiter->type == RWSEM_WAITING_FOR_WRITE) waiter 507 kernel/locking/rwsem.c list_move_tail(&waiter->list, &wlist); waiter 534 kernel/locking/rwsem.c list_for_each_entry_safe(waiter, tmp, &wlist, list) { waiter 537 kernel/locking/rwsem.c tsk = waiter->task; waiter 546 kernel/locking/rwsem.c smp_store_release(&waiter->task, NULL); waiter 998 kernel/locking/rwsem.c struct rwsem_waiter waiter; waiter 1006 kernel/locking/rwsem.c waiter.last_rowner = atomic_long_read(&sem->owner); waiter 1007 kernel/locking/rwsem.c if (!(waiter.last_rowner & RWSEM_READER_OWNED)) waiter 1008 kernel/locking/rwsem.c waiter.last_rowner &= RWSEM_RD_NONSPINNABLE; waiter 1033 kernel/locking/rwsem.c } else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) { waiter 1039 kernel/locking/rwsem.c waiter.task = current; waiter 1040 kernel/locking/rwsem.c waiter.type = RWSEM_WAITING_FOR_READ; waiter 1041 kernel/locking/rwsem.c waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; waiter 1062 kernel/locking/rwsem.c list_add_tail(&waiter.list, &sem->wait_list); waiter 1090 kernel/locking/rwsem.c if (!smp_load_acquire(&waiter.task)) { waiter 1096 kernel/locking/rwsem.c if (waiter.task) waiter 1111 kernel/locking/rwsem.c list_del(&waiter.list); waiter 1144 kernel/locking/rwsem.c struct rwsem_waiter waiter; waiter 1166 kernel/locking/rwsem.c waiter.task = current; waiter 1167 kernel/locking/rwsem.c waiter.type = RWSEM_WAITING_FOR_WRITE; waiter 1168 kernel/locking/rwsem.c waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT; waiter 1175 kernel/locking/rwsem.c list_add_tail(&waiter.list, &sem->wait_list); waiter 1249 kernel/locking/rwsem.c (rwsem_first_waiter(sem) == &waiter)) waiter 1261 kernel/locking/rwsem.c time_after(jiffies, waiter.timeout))) { waiter 1271 kernel/locking/rwsem.c list_del(&waiter.list); waiter 1281 kernel/locking/rwsem.c list_del(&waiter.list); waiter 207 kernel/locking/semaphore.c struct semaphore_waiter waiter; waiter 209 kernel/locking/semaphore.c list_add_tail(&waiter.list, &sem->wait_list); waiter 210 kernel/locking/semaphore.c waiter.task = current; waiter 211 kernel/locking/semaphore.c waiter.up = false; waiter 222 kernel/locking/semaphore.c if (waiter.up) waiter 227 kernel/locking/semaphore.c list_del(&waiter.list); waiter 231 kernel/locking/semaphore.c list_del(&waiter.list); waiter 257 kernel/locking/semaphore.c struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, waiter 259 kernel/locking/semaphore.c list_del(&waiter->list); waiter 260 kernel/locking/semaphore.c waiter->up = true; waiter 261 kernel/locking/semaphore.c wake_up_process(waiter->task); waiter 1702 kernel/printk/printk.c int waiter; waiter 1705 kernel/printk/printk.c waiter = READ_ONCE(console_waiter); waiter 1709 kernel/printk/printk.c if (!waiter) { waiter 1740 kernel/printk/printk.c bool waiter; waiter 1751 kernel/printk/printk.c waiter = READ_ONCE(console_waiter); waiter 1752 kernel/printk/printk.c if (!waiter && owner && owner != current) { waiter 185 lib/klist.c struct klist_waiter *waiter, *tmp; waiter 191 lib/klist.c list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { waiter 192 lib/klist.c if (waiter->node != n) waiter 195 lib/klist.c list_del(&waiter->list); waiter 196 lib/klist.c waiter->woken = 1; waiter 198 lib/klist.c wake_up_process(waiter->process); waiter 240 lib/klist.c struct klist_waiter waiter; waiter 242 lib/klist.c waiter.node = n; waiter 243 lib/klist.c waiter.process = current; waiter 244 lib/klist.c waiter.woken = 0; waiter 246 lib/klist.c list_add(&waiter.list, &klist_remove_waiters); waiter 253 lib/klist.c if (waiter.woken) waiter 1261 mm/filemap.c void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) waiter 1267 mm/filemap.c __add_wait_queue_entry_tail(q, waiter); waiter 123 mm/page_io.c struct task_struct *waiter = bio->bi_private; waiter 140 mm/page_io.c if (waiter) { waiter 141 mm/page_io.c blk_wake_io_task(waiter); waiter 142 mm/page_io.c put_task_struct(waiter); waiter 283 tools/testing/selftests/futex/functional/futex_requeue_pi.c pthread_t waiter[THREAD_MAX], waker, blocker; waiter 320 tools/testing/selftests/futex/functional/futex_requeue_pi.c if (create_rt_thread(&waiter[i], waiterfn, (void *)&args[i], waiter 339 tools/testing/selftests/futex/functional/futex_requeue_pi.c pthread_join(waiter[i], waiter 122 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c pthread_t waiter; waiter 158 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c res = create_rt_thread(&waiter, waiterfn, NULL, SCHED_FIFO, 1); waiter 177 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c pthread_kill(waiter, SIGUSR1); waiter 208 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c pthread_kill(waiter, SIGUSR1); waiter 210 tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c pthread_join(waiter, NULL);