/linux-4.1.27/Documentation/locking/ |
D | rt-mutex.txt | 31 The enqueueing of the waiters into the rtmutex waiter list is done in 34 priority waiters list. This list too queues in priority order. Whenever 42 without waiters. The optimized fastpath operations require cmpxchg 51 waiters" state. 60 taskpointer 1 0 mutex is held and has waiters 61 taskpointer 1 1 task is pending owner and mutex has waiters 68 the mutex which puts the woken up thread back on the waiters list. 72 takes/releases locks that have lower-prio waiters. Without this 76 (*) The "mutex has waiters" bit gets set to take the lock. If the lock 78 no waiters. So this is a transitional state to synchronize with looking
|
D | rt-mutex-design.txt | 107 waiters - A list of processes that are blocked on a mutex. 207 Every mutex keeps track of all the waiters that are blocked on itself. The mutex 208 has a plist to store these waiters by priority. This list is protected by 218 a list of all top waiters of the mutexes that are owned by the process. 219 Note that this list only holds the top waiters and not all waiters that are 322 in more detail, but is set whenever there are waiters on a mutex. 375 waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs 585 be false, because if the mutex has no owner, there are no waiters and 586 the current task also won't have any waiters. But we don't have the lock 615 means that if the mutex doesn't have any waiters, there's no accounting needed [all …]
|
D | mutex-design.txt | 29 0: locked, no waiters 30 negative: locked, with potential waiters
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | radeon_semaphore.c | 50 (*semaphore)->waiters = 0; in radeon_semaphore_create() 66 --semaphore->waiters; in radeon_semaphore_emit_signal() 83 ++semaphore->waiters; in radeon_semaphore_emit_wait() 99 if ((*semaphore)->waiters > 0) { in radeon_semaphore_free()
|
D | radeon_trace.h | 175 __field(signed, waiters) 181 __entry->waiters = sem->waiters; 186 __entry->waiters, __entry->gpu_addr)
|
D | radeon.h | 592 signed waiters; member
|
/linux-4.1.27/drivers/md/persistent-data/ |
D | dm-block-manager.c | 40 struct list_head waiters; member 160 list_for_each_entry_safe(w, tmp, &lock->waiters, list) { in __wake_many() 186 INIT_LIST_HEAD(&lock->waiters); in bl_init() 195 list_empty(&lock->waiters); in __available_for_read() 221 list_add_tail(&w.list, &lock->waiters); in bl_down_read() 256 if (!list_empty(&lock->waiters)) in bl_up_read() 273 if (lock->count == 0 && list_empty(&lock->waiters)) { in bl_down_write() 288 list_add(&w.list, &lock->waiters); in bl_down_write() 302 if (!list_empty(&lock->waiters)) in bl_up_write()
|
/linux-4.1.27/include/linux/ |
D | rtmutex.h | 31 struct rb_root waiters; member 71 , .waiters = RB_ROOT \
|
/linux-4.1.27/Documentation/ |
D | futex-requeue-pi.txt | 6 left without an owner if it has waiters; doing so would break the PI 47 has waiters. Note that pthread_cond_wait() attempts to lock the 49 underlying rt_mutex with waiters, and no owner, breaking the 88 In order to ensure the rt_mutex has an owner if it has waiters, it
|
D | robust-futexes.txt | 20 sys_futex(FUTEX_WAKE) syscall to wake them up. Once all waiters have 29 pthread_mutex_t, or yum is kill -9-ed), then waiters for that lock need
|
D | pi-futex.txt | 110 pi_state->rt_mutex and thus wakes up any potential waiters.
|
D | robust-futex-ABI.txt | 163 In the above, bit 31 was set by futex waiters on that lock to indicate
|
D | memory-barriers.txt | 2279 struct list_head waiters;
|
/linux-4.1.27/Documentation/scheduler/ |
D | completion.txt | 203 achieved calls complete() to signal exactly one of the waiters that it can 208 or calls complete_all() to signal all current and future waiters. 218 of waiters to continue - each call to complete() will simply increment the 242 completions that were not yet consumed by waiters (implying that there are 243 waiters) and true otherwise;
|
/linux-4.1.27/kernel/locking/ |
D | rtmutex_common.h | 65 return !RB_EMPTY_ROOT(&lock->waiters); in rt_mutex_has_waiters()
|
D | rtmutex.c | 169 struct rb_node **link = &lock->waiters.rb_node; in rt_mutex_enqueue() 189 rb_insert_color(&waiter->tree_entry, &lock->waiters); in rt_mutex_enqueue() 201 rb_erase(&waiter->tree_entry, &lock->waiters); in rt_mutex_dequeue() 1496 lock->waiters = RB_ROOT; in __rt_mutex_init()
|
/linux-4.1.27/drivers/gpu/drm/omapdrm/ |
D | omap_gem.c | 1040 static LIST_HEAD(waiters); 1065 list_for_each_entry_safe(waiter, n, &waiters, list) { in sync_op_update() 1166 list_add_tail(&waiter->list, &waiters); in omap_gem_op_sync() 1217 list_add_tail(&waiter->list, &waiters); in omap_gem_op_async()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_log_priv.h | 349 struct list_head waiters; member
|
D | xfs_log.c | 198 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init() 209 list_for_each_entry(tic, &head->waiters, t_queue) in xlog_grant_head_wake_all() 240 list_for_each_entry(tic, &head->waiters, t_queue) { in xlog_grant_head_wake() 261 list_add_tail(&tic->t_queue, &head->waiters); in xlog_grant_head_wait() 326 if (!list_empty_careful(&head->waiters)) { in xlog_grant_head_check() 991 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake() 1000 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
|
D | xfs_trace.h | 922 __entry->reserveq = list_empty(&log->l_reserve_head.waiters); 923 __entry->writeq = list_empty(&log->l_write_head.waiters);
|
/linux-4.1.27/kernel/ |
D | futex.c | 252 atomic_t waiters; member 278 atomic_inc(&hb->waiters); in hb_waiters_inc() 293 atomic_dec(&hb->waiters); in hb_waiters_dec() 300 return atomic_read(&hb->waiters); in hb_waiters_pending() 3070 atomic_set(&futex_queues[i].waiters, 0); in futex_init()
|
/linux-4.1.27/kernel/trace/ |
D | ring_buffer.c | 447 wait_queue_head_t waiters; member 533 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters() 578 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait() 635 finish_wait(&work->waiters, &wait); in ring_buffer_wait() 670 poll_wait(filp, &work->waiters, poll_table); in ring_buffer_poll_wait() 1249 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer() 1348 init_waitqueue_head(&buffer->irq_work.waiters); in __ring_buffer_alloc()
|
/linux-4.1.27/Documentation/ioctl/ |
D | botching-up-ioctls.txt | 118 waiters.
|
/linux-4.1.27/Documentation/scsi/ |
D | scsi_eh.txt | 185 3. Wakes up waiters on shost->host_wait. This occurs if someone
|