| /linux-4.4.14/Documentation/locking/ |
| D | rt-mutex.txt | 31 The enqueueing of the waiters into the rtmutex waiter list is done in 34 priority waiters list. This list too queues in priority order. Whenever 42 without waiters. The optimized fastpath operations require cmpxchg 51 waiters" state. 60 taskpointer 1 0 mutex is held and has waiters 61 taskpointer 1 1 task is pending owner and mutex has waiters 68 the mutex which puts the woken up thread back on the waiters list. 72 takes/releases locks that have lower-prio waiters. Without this 76 (*) The "mutex has waiters" bit gets set to take the lock. If the lock 78 no waiters. So this is a transitional state to synchronize with looking
|
| D | rt-mutex-design.txt | 107 waiters - A list of processes that are blocked on a mutex. 207 Every mutex keeps track of all the waiters that are blocked on itself. The mutex 208 has a plist to store these waiters by priority. This list is protected by 218 a list of all top waiters of the mutexes that are owned by the process. 219 Note that this list only holds the top waiters and not all waiters that are 322 in more detail, but is set whenever there are waiters on a mutex. 375 waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs 585 be false, because if the mutex has no owner, there are no waiters and 586 the current task also won't have any waiters. But we don't have the lock 615 means that if the mutex doesn't have any waiters, there's no accounting needed [all …]
|
| D | mutex-design.txt | 29 0: locked, no waiters 30 negative: locked, with potential waiters
|
| /linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
| D | amdgpu_semaphore.c | 50 (*semaphore)->waiters = 0; in amdgpu_semaphore_create() 64 --semaphore->waiters; in amdgpu_semaphore_emit_signal() 79 ++semaphore->waiters; in amdgpu_semaphore_emit_wait() 95 if ((*semaphore)->waiters > 0) { in amdgpu_semaphore_free()
|
| D | amdgpu_trace.h | 258 __field(signed, waiters) 264 __entry->waiters = sem->waiters; 269 __entry->waiters, __entry->gpu_addr)
|
| D | amdgpu.h | 644 signed waiters; member
|
| /linux-4.4.14/drivers/gpu/drm/radeon/ |
| D | radeon_semaphore.c | 50 (*semaphore)->waiters = 0; in radeon_semaphore_create() 66 --semaphore->waiters; in radeon_semaphore_emit_signal() 83 ++semaphore->waiters; in radeon_semaphore_emit_wait() 99 if ((*semaphore)->waiters > 0) { in radeon_semaphore_free()
|
| D | radeon_trace.h | 175 __field(signed, waiters) 181 __entry->waiters = sem->waiters; 186 __entry->waiters, __entry->gpu_addr)
|
| D | radeon.h | 591 signed waiters; member
|
| /linux-4.4.14/drivers/gpu/drm/amd/amdkfd/ |
| D | kfd_events.c | 44 struct list_head waiters; member 358 list_del(&ev->waiters); in destroy_event() 422 INIT_LIST_HEAD(&ev->waiters); in kfd_event_create() 483 ev->signaled = !ev->auto_reset || list_empty(&ev->waiters); in set_event() 485 list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) { in set_event() 489 list_del_init(&waiter->waiters); in set_event() 612 INIT_LIST_HEAD(&event_waiters[i].waiters); in alloc_event_waiters() 635 list_add(&waiter->waiters, &ev->waiters); in init_event_waiter() 708 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) in free_waiters() argument 713 list_del(&waiters[i].waiters); in free_waiters() [all …]
|
| D | kfd_events.h | 59 struct list_head waiters; /* List of kfd_event_waiter by waiters. */ member
|
| /linux-4.4.14/drivers/md/persistent-data/ |
| D | dm-block-manager.c | 40 struct list_head waiters; member 160 list_for_each_entry_safe(w, tmp, &lock->waiters, list) { in __wake_many() 186 INIT_LIST_HEAD(&lock->waiters); in bl_init() 195 list_empty(&lock->waiters); in __available_for_read() 221 list_add_tail(&w.list, &lock->waiters); in bl_down_read() 256 if (!list_empty(&lock->waiters)) in bl_up_read() 273 if (lock->count == 0 && list_empty(&lock->waiters)) { in bl_down_write() 288 list_add(&w.list, &lock->waiters); in bl_down_write() 302 if (!list_empty(&lock->waiters)) in bl_up_write()
|
| /linux-4.4.14/include/linux/ |
| D | rtmutex.h | 31 struct rb_root waiters; member 71 , .waiters = RB_ROOT \
|
| /linux-4.4.14/Documentation/ |
| D | futex-requeue-pi.txt | 6 left without an owner if it has waiters; doing so would break the PI 47 has waiters. Note that pthread_cond_wait() attempts to lock the 49 underlying rt_mutex with waiters, and no owner, breaking the 88 In order to ensure the rt_mutex has an owner if it has waiters, it
|
| D | robust-futexes.txt | 20 sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have 29 pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need
|
| D | pi-futex.txt | 110 pi_state->rt_mutex and thus wakes up any potential waiters.
|
| D | robust-futex-ABI.txt | 163 In the above, bit 31 was set by futex waiters on that lock to indicate
|
| D | memory-barriers.txt | 2234 struct list_head waiters;
|
| /linux-4.4.14/Documentation/scheduler/ |
| D | completion.txt | 203 achieved calls complete() to signal exactly one of the waiters that it can 208 or calls complete_all() to signal all current and future waiters. 218 of waiters to continue - each call to complete() will simply increment the 242 completions that were not yet consumed by waiters (implying that there are 243 waiters) and true otherwise;
|
| /linux-4.4.14/kernel/locking/ |
| D | rtmutex_common.h | 43 return !RB_EMPTY_ROOT(&lock->waiters); in rt_mutex_has_waiters()
|
| D | rtmutex.c | 182 struct rb_node **link = &lock->waiters.rb_node; in rt_mutex_enqueue() 202 rb_insert_color(&waiter->tree_entry, &lock->waiters); in rt_mutex_enqueue() 214 rb_erase(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue() 1538 lock->waiters = RB_ROOT; in __rt_mutex_init()
|
| /linux-4.4.14/drivers/gpu/drm/omapdrm/ |
| D | omap_gem.c | 1038 static LIST_HEAD(waiters); 1063 list_for_each_entry_safe(waiter, n, &waiters, list) { in sync_op_update() 1164 list_add_tail(&waiter->list, &waiters); in omap_gem_op_sync() 1215 list_add_tail(&waiter->list, &waiters); in omap_gem_op_async()
|
| /linux-4.4.14/fs/xfs/ |
| D | xfs_log_priv.h | 349 struct list_head waiters; member
|
| D | xfs_log.c | 198 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init() 209 list_for_each_entry(tic, &head->waiters, t_queue) in xlog_grant_head_wake_all() 240 list_for_each_entry(tic, &head->waiters, t_queue) { in xlog_grant_head_wake() 261 list_add_tail(&tic->t_queue, &head->waiters); in xlog_grant_head_wait() 326 if (!list_empty_careful(&head->waiters)) { in xlog_grant_head_check() 1006 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake() 1015 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
|
| D | xfs_trace.h | 971 __entry->reserveq = list_empty(&log->l_reserve_head.waiters); 972 __entry->writeq = list_empty(&log->l_write_head.waiters);
|
| /linux-4.4.14/kernel/ |
| D | futex.c | 253 atomic_t waiters; member 348 atomic_inc(&hb->waiters); in hb_waiters_inc() 363 atomic_dec(&hb->waiters); in hb_waiters_dec() 370 return atomic_read(&hb->waiters); in hb_waiters_pending() 3195 atomic_set(&futex_queues[i].waiters, 0); in futex_init()
|
| /linux-4.4.14/kernel/trace/ |
| D | ring_buffer.c | 395 wait_queue_head_t waiters; member 509 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters() 554 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait() 611 finish_wait(&work->waiters, &wait); in ring_buffer_wait() 646 poll_wait(filp, &work->waiters, poll_table); in ring_buffer_poll_wait() 1225 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer() 1324 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
|
| /linux-4.4.14/Documentation/ioctl/ |
| D | botching-up-ioctls.txt | 118 waiters.
|
| /linux-4.4.14/Documentation/scsi/ |
| D | scsi_eh.txt | 185 3. Wakes up waiters on shost->host_wait. This occurs if someone
|