Searched refs:queued (Results 1 - 200 of 970) sorted by relevance

12345

/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_flip_work.c60 list_add_tail(&task->node, &work->queued); drm_flip_work_queue_task()
89 * drm_flip_work_commit - commit queued work
91 * @wq: the work-queue to run the queued work on
93 * Trigger work previously queued by drm_flip_work_queue() to run
96 * prior), and then from vblank irq commit the queued work.
104 list_splice_tail(&work->queued, &work->commited); drm_flip_work_commit()
105 INIT_LIST_HEAD(&work->queued); drm_flip_work_commit()
148 INIT_LIST_HEAD(&work->queued); drm_flip_work_init()
165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); drm_flip_work_cleanup()
/linux-4.1.27/net/x25/
H A Dx25_in.c206 int queued = 0; x25_state3_machine() local
273 queued = 1; x25_state3_machine()
311 queued = !sock_queue_rcv_skb(sk, skb); x25_state3_machine()
315 queued = 1; x25_state3_machine()
326 return queued; x25_state3_machine()
384 int queued = 0, frametype, ns, nr, q, d, m; x25_process_rx_frame() local
393 queued = x25_state1_machine(sk, skb, frametype); x25_process_rx_frame()
396 queued = x25_state2_machine(sk, skb, frametype); x25_process_rx_frame()
399 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); x25_process_rx_frame()
402 queued = x25_state4_machine(sk, skb, frametype); x25_process_rx_frame()
408 return queued; x25_process_rx_frame()
413 int queued = x25_process_rx_frame(sk, skb); x25_backlog_rcv() local
415 if (!queued) x25_backlog_rcv()
H A Dx25_dev.c56 int queued = 1; x25_receive_data() local
61 queued = x25_process_rx_frame(sk, skb); x25_receive_data()
63 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); x25_receive_data()
67 return queued; x25_receive_data()
H A Dx25_out.c21 * 2000-11-10 Henner Eisen x25_send_iframe(): re-queued frames
/linux-4.1.27/include/trace/events/
H A Dworkqueue.h28 * workqueue_queue_work - called when a work gets queued
33 * This event occurs when a work is queued immediately or once a
34 * delayed work is actually queued on a workqueue (ie: once the delay
69 * This event occurs when a queued work is put on the active queue,
H A Drcu.h428 * number of lazy callbacks queued, and the fourth element is the
429 * total number of callbacks queued.
464 * the fourth argument is the number of lazy callbacks queued, and the
465 * fifth argument is the total number of callbacks queued.
498 * the second is the number of lazy callbacks queued, the third is
499 * the total number of callbacks queued, and the fourth argument is
589 * queued, the fourth argument (nr) is the return value of need_resched(),
/linux-4.1.27/include/drm/
H A Ddrm_flip_work.h47 * @val: value queued via drm_flip_work_queue()
69 * @queued: queued tasks
71 * @lock: lock to access queued and commited lists
77 struct list_head queued; member in struct:drm_flip_work
H A Ddrm_dp_mst_helper.h349 /* msg is queued to be put into a slot */
444 the mstb tx_slots and txmsg->state once they are queued */
/linux-4.1.27/include/linux/
H A Ddynamic_queue_limits.h10 * 1) Objects are queued up to some limit specified as number of objects.
13 * 3) Starvation occurs when limit has been reached, all queued data has
16 * 4) Minimizing the amount of queued data is desirable.
23 * dql_avail - returns how many objects are available to be queued based
42 unsigned int num_queued; /* Total ever queued */
69 * Record number of objects queued. Assumes that caller has already checked
88 /* Returns how many objects can be queued, < 0 indicates over limit. */ dql_avail()
H A Derrno.h30 #define EIOCBQUEUED 529 /* iocb queued, will get completion event */
H A Dirq_work.h11 * pending next, 3 -> {busy} : queued, pending callback
H A Dof_pdt.h5 * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
H A Dsignal.h13 * Real Time signals may be queued.
385 * from TASK_STOPPED state and also clears any pending/queued stop signals
388 * any pending/queued SIGCONT signals; this happens regardless of blocking,
H A Dkthread.h59 * can be queued and flushed using queue/flush_kthread_work()
H A Dblk-mq.h50 unsigned long queued; member in struct:blk_mq_hw_ctx
140 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
H A Ddlm.h116 * 0 if request is successfully queued for processing
159 * 0 if request is successfully queued for processing
H A Dhdlcdrv.h150 /* queued skb for transmission */
H A Dtty_ldisc.h25 * buffers of any input characters it may have queued to be
31 * discipline may have queued up to be delivered to the user mode
H A Dworkqueue.h339 * any specific CPU, not concurrency managed, and all queued works are
404 * most one work item at any given time in the queued order. They are
525 * queued and leaves it in the same position on the kernel-global
H A Dinput.h115 * @num_vals: number of values queued in the current frame
116 * @max_vals: maximum number of values queued in a frame
117 * @vals: array of values queued in the current frame
H A Dccp.h45 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
55 * The cmd has been successfully queued if:
/linux-4.1.27/net/rose/
H A Drose_in.c107 int queued = 0; rose_state3_machine() local
169 queued = 1; rose_state3_machine()
206 return queued; rose_state3_machine()
266 int queued = 0, frametype, ns, nr, q, d, m; rose_process_rx_frame() local
275 queued = rose_state1_machine(sk, skb, frametype); rose_process_rx_frame()
278 queued = rose_state2_machine(sk, skb, frametype); rose_process_rx_frame()
281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); rose_process_rx_frame()
284 queued = rose_state4_machine(sk, skb, frametype); rose_process_rx_frame()
287 queued = rose_state5_machine(sk, skb, frametype); rose_process_rx_frame()
293 return queued; rose_process_rx_frame()
/linux-4.1.27/net/ax25/
H A Dax25_std_in.c146 int queued = 0; ax25_std_state3_machine() local
228 queued = ax25_rx_iframe(ax25, skb); ax25_std_state3_machine()
261 return queued; ax25_std_state3_machine()
271 int queued = 0; ax25_std_state4_machine() local
383 queued = ax25_rx_iframe(ax25, skb); ax25_std_state4_machine()
416 return queued; ax25_std_state4_machine()
424 int queued = 0, frametype, ns, nr, pf; ax25_std_frame_in() local
430 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); ax25_std_frame_in()
433 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); ax25_std_frame_in()
436 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_std_frame_in()
439 queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_std_frame_in()
445 return queued; ax25_std_frame_in()
H A Dax25_ds_in.c150 int queued = 0; ax25_ds_state3_machine() local
243 queued = ax25_rx_iframe(ax25, skb); ax25_ds_state3_machine()
276 return queued; ax25_ds_state3_machine()
284 int queued = 0, frametype, ns, nr, pf; ax25_ds_frame_in() local
290 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); ax25_ds_frame_in()
293 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); ax25_ds_frame_in()
296 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_ds_frame_in()
300 return queued; ax25_ds_frame_in()
H A Dax25_in.c107 int queued = 0; ax25_rx_iframe() local
149 queued = 1; ax25_rx_iframe()
155 return queued; ax25_rx_iframe()
163 int queued = 0; ax25_process_rx_frame() local
171 queued = ax25_std_frame_in(ax25, skb, type); ax25_process_rx_frame()
177 queued = ax25_ds_frame_in(ax25, skb, type); ax25_process_rx_frame()
179 queued = ax25_std_frame_in(ax25, skb, type); ax25_process_rx_frame()
184 return queued; ax25_process_rx_frame()
309 * Process the frame. If it is queued up internally it ax25_rcv()
/linux-4.1.27/block/
H A Dblk-throttle.c37 * To avoid such starvation, dispatched bios are queued separately
42 * throtl_qnode is used to keep the queued bios separated by their sources.
43 * Bios are queued to throtl_qnode which in turn is queued to
47 * belongs to a throtl_grp and gets queued on itself or the parent, so
49 * queued and decrementing when dequeued is enough to keep the whole blkg
53 struct list_head node; /* service_queue->queued[] */
54 struct bio_list bios; /* queued bios */
62 * Bios queued directly to this service_queue or dispatched from
65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member in struct:throtl_service_queue
66 unsigned int nr_queued[2]; /* number of queued bios */
74 unsigned int nr_pending; /* # queued in the tree */
108 * qnode_on_self is used when bios are directly queued to this
159 /* Total Number of queued bios on READ and WRITE lists */
317 * @queued: the service_queue->queued[] list @qn belongs to
319 * Add @bio to @qn and put @qn on @queued if it's not already on.
324 struct list_head *queued) throtl_qnode_add_bio()
328 list_add_tail(&qn->node, queued); throtl_qnode_add_bio()
335 * @queued: the qnode list to peek
337 static struct bio *throtl_peek_queued(struct list_head *queued) throtl_peek_queued() argument
339 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); throtl_peek_queued()
342 if (list_empty(queued)) throtl_peek_queued()
352 * @queued: the qnode list to pop a bio from
355 * Pop the first bio from the qnode list @queued. After popping, the first
356 * qnode is removed from @queued if empty or moved to the end of @queued so
364 static struct bio *throtl_pop_queued(struct list_head *queued, throtl_pop_queued() argument
367 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); throtl_pop_queued()
370 if (list_empty(queued)) throtl_pop_queued()
383 list_move_tail(&qn->node, queued); throtl_pop_queued()
393 INIT_LIST_HEAD(&sq->queued[0]); throtl_service_queue_init()
394 INIT_LIST_HEAD(&sq->queued[1]); throtl_service_queue_init()
915 * queued in the group bio list. So one should not be calling tg_may_dispatch()
917 * queued. tg_may_dispatch()
920 bio != throtl_peek_queued(&tg->service_queue.queued[rw])); tg_may_dispatch()
1030 * If @tg doesn't currently have any bios queued in the same throtl_add_bio_tg()
1038 throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); throtl_add_bio_tg()
1050 if ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_update_disptime()
1053 if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_update_disptime()
1092 bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); tg_dispatch_one_bio()
1101 * bio_lists[] and decrease total number queued. The caller is tg_dispatch_one_bio()
1109 &parent_sq->queued[rw]); tg_dispatch_one_bio()
1130 while ((bio = throtl_peek_queued(&sq->queued[READ])) && throtl_dispatch_tg()
1140 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && throtl_dispatch_tg()
1186 * pending and queued on the service_queue's pending_tree and expires when
1257 * This function is queued for execution when bio's reach the bio_lists[]
1276 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL))) blk_throtl_dispatch_work_fn()
1522 /* throtl is FIFO - if bios are already queued, should queue */ blk_throtl_bio()
1534 * We need to trim slice even when bios are not being queued blk_throtl_bio()
1535 * otherwise it might happen that a bio is not queued for blk_throtl_bio()
1539 * low rate and * newly queued IO gets a really long dispatch blk_throtl_bio()
1542 * So keep on trimming slice even if bio is not queued. blk_throtl_bio()
1559 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", blk_throtl_bio()
1597 * Dispatch all bios from all children tg's queued on @parent_sq. On
1611 while ((bio = throtl_peek_queued(&sq->queued[READ]))) tg_drain_bios()
1613 while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) tg_drain_bios()
1653 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
323 throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, struct list_head *queued) throtl_qnode_add_bio() argument
H A Dblk-flush.c171 bool queued = false, kicked; blk_flush_complete_seq() local
192 queued = blk_flush_queue_rq(rq, true); blk_flush_complete_seq()
216 return kicked | queued; blk_flush_complete_seq()
223 bool queued = false; flush_end_io() local
247 queued |= blk_flush_complete_seq(rq, fq, seq, error); list_for_each_entry_safe()
261 if (queued || fq->flush_queue_delayed) {
H A Dcfq-iosched.c116 /* requests queued in sort_list */
117 int queued[2]; member in struct:cfq_queue
185 /* number of IOs queued up */
186 struct blkg_rwstat queued; member in struct:cfqg_stats
194 /* sum of number of ios queued across all samples */
204 /* total time with empty current active q with other requests queued */
540 if (blkg_rwstat_total(&stats->queued)) cfqg_stats_set_start_empty_time()
584 blkg_rwstat_total(&stats->queued)); cfqg_stats_update_avg_queue_size()
647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); cfqg_stats_update_io_add()
663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); cfqg_stats_update_io_remove()
695 /* queued stats shouldn't be cleared */ cfqg_stats_reset()
716 /* queued stats shouldn't be cleared */ cfqg_stats_merge()
917 * if a queue is marked sync and has sync io queued. A sync queue with async
1531 blkg_rwstat_init(&stats->queued); cfqg_stats_init()
1925 .private = offsetof(struct cfq_group, stats.queued),
1967 .private = offsetof(struct cfq_group, stats.queued),
2225 BUG_ON(!cfqq->queued[sync]); cfq_del_rq_rb()
2226 cfqq->queued[sync]--; cfq_del_rq_rb()
2249 cfqq->queued[rq_is_sync(rq)]++; cfq_add_rq_rb()
2274 cfqq->queued[rq_is_sync(rq)]--; cfq_reposition_rq_rb()
2416 * Lookup the cfqq that this bio will be queued with and allow cfq_allow_merge()
2417 * merge only if rq is queued there. cfq_allow_merge()
3757 if (cfqq->queued[0] + cfqq->queued[1] >= 4) cfq_update_idle_window()
3982 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < cfq_update_hw_tag()
4105 * - queues with still some requests queued cfq_completed_request()
4139 * does not necessarily imply that a request actually will be queued. cfq_may_queue()
H A Dblk-mq-sysfs.c180 return sprintf(page, "%lu\n", hctx->queued); blk_mq_hw_sysfs_queued_show()
275 .attr = {.name = "queued", .mode = S_IRUGO },
H A Dblk-mq.c774 int queued; __blk_mq_run_hw_queue() local
808 queued = 0; __blk_mq_run_hw_queue()
823 queued++; __blk_mq_run_hw_queue()
848 if (!queued) __blk_mq_run_hw_queue()
850 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) __blk_mq_run_hw_queue()
851 hctx->dispatched[ilog2(queued) + 1]++; __blk_mq_run_hw_queue()
878 * BLK_MQ_CPU_WORK_BATCH queued items.
1234 hctx->queued++; blk_mq_map_request()
/linux-4.1.27/net/netrom/
H A Dnr_in.c156 int queued = 0; nr_state3_machine() local
229 queued = 1; nr_state3_machine()
276 return queued; nr_state3_machine()
283 int queued = 0, frametype; nr_process_rx_frame() local
292 queued = nr_state1_machine(sk, skb, frametype); nr_process_rx_frame()
295 queued = nr_state2_machine(sk, skb, frametype); nr_process_rx_frame()
298 queued = nr_state3_machine(sk, skb, frametype); nr_process_rx_frame()
304 return queued; nr_process_rx_frame()
/linux-4.1.27/virt/kvm/
H A Dasync_pf.c134 vcpu->async_pf.queued = 0; kvm_clear_async_pf_completion_queue()
153 vcpu->async_pf.queued--; kvm_check_async_pf_completion()
163 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) kvm_setup_async_pf()
195 vcpu->async_pf.queued++; kvm_setup_async_pf()
223 vcpu->async_pf.queued++; kvm_async_pf_wakeup_all()
/linux-4.1.27/net/dccp/
H A Dinput.c49 int queued = 0; dccp_rcv_close() local
80 queued = 1; dccp_rcv_close()
90 return queued; dccp_rcv_close()
95 int queued = 0; dccp_rcv_closereq() local
105 return queued; dccp_rcv_closereq()
117 queued = 1; dccp_rcv_closereq()
124 return queued; dccp_rcv_closereq()
528 int queued = 0; dccp_rcv_respond_partopen_state_process() local
565 queued = 1; /* packet was queued dccp_rcv_respond_partopen_state_process()
571 return queued; dccp_rcv_respond_partopen_state_process()
580 int queued = 0; dccp_rcv_state_process() local
670 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); dccp_rcv_state_process()
671 if (queued >= 0) dccp_rcv_state_process()
672 return queued; dccp_rcv_state_process()
683 queued = dccp_rcv_respond_partopen_state_process(sk, skb, dccp_rcv_state_process()
701 if (!queued) { dccp_rcv_state_process()
H A Dccid.h138 * Congestion control of queued data packets via CCID decision.
141 * queued packet may be sent, using the return code of ccid_hc_tx_send_packet().
/linux-4.1.27/drivers/staging/media/davinci_vpfe/
H A Dvpfe_video.h31 * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
32 * if there was no buffer previously queued.
45 /* indicates that buffer is not queued */
47 /* indicates that buffer is queued */
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Ddebug.h144 * @queued: Total MPDUs (non-aggr) queued
146 * @a_aggr: Total no. of aggregates queued
147 * @a_queued_hw: Total AMPDUs queued to hardware
148 * @a_queued_sw: Total AMPDUs queued to software queues
170 u32 queued; member in struct:ath_tx_stats
H A Dhtc_drv_debug.c201 "%20s : %10u\n", "Buffers queued", read_file_xmit()
207 "%20s : %10u\n", "SKBs queued", read_file_xmit()
216 "%20s : %10u\n", "CAB queued", read_file_xmit()
220 "%20s : %10u\n", "BE queued", read_file_xmit()
223 "%20s : %10u\n", "BK queued", read_file_xmit()
226 "%20s : %10u\n", "VI queued", read_file_xmit()
229 "%20s : %10u\n", "VO queued", read_file_xmit()
/linux-4.1.27/drivers/dma/
H A Dsirf-dma.c66 struct list_head queued; member in struct:sirfsoc_dma_chan
112 /* Execute all queued DMA descriptors */ sirfsoc_dma_execute()
124 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, sirfsoc_dma_execute()
126 /* Move the first queued descriptor to active list */ sirfsoc_dma_execute()
176 /* Execute queued descriptors */ sirfsoc_dma_irq()
178 if (!list_empty(&schan->queued)) sirfsoc_dma_irq()
275 list_move_tail(&sdesc->node, &schan->queued); sirfsoc_dma_tx_submit()
325 list_splice_tail_init(&schan->queued, &schan->free); sirfsoc_dma_terminate_all()
429 BUG_ON(!list_empty(&schan->queued)); sirfsoc_dma_free_chan_resources()
453 if (list_empty(&schan->active) && !list_empty(&schan->queued)) sirfsoc_dma_issue_pending()
735 INIT_LIST_HEAD(&schan->queued); sirfsoc_dma_probe()
H A Dmpc512x_dma.c207 struct list_head queued; member in struct:mpc_dma_chan
254 * Execute all queued DMA descriptors.
259 * c) mchan->queued list contains at least one entry.
269 while (!list_empty(&mchan->queued)) { mpc_dma_execute()
270 mdesc = list_first_entry(&mchan->queued, mpc_dma_execute()
346 /* Execute queued descriptors */ mpc_dma_irq_process()
348 if (!list_empty(&mchan->queued)) mpc_dma_irq_process()
485 list_move_tail(&mdesc->node, &mchan->queued); mpc_dma_tx_submit()
487 /* If channel is idle, execute all queued descriptors */ mpc_dma_tx_submit()
571 BUG_ON(!list_empty(&mchan->queued)); mpc_dma_free_chan_resources()
856 list_splice_tail_init(&mchan->queued, &mchan->free); mpc_dma_device_terminate_all()
972 INIT_LIST_HEAD(&mchan->queued); mpc_dma_probe()
H A Dnbpfaxi.c193 * @queued: list of queued descriptors
217 struct list_head queued; member in struct:nbpf_channel
579 if (list_empty(&chan->queued)) nbpf_issue_pending()
582 list_splice_tail_init(&chan->queued, &chan->active); nbpf_issue_pending()
623 list_for_each_entry(desc, &chan->queued, node) nbpf_tx_status()
651 list_add_tail(&desc->node, &chan->queued); nbpf_tx_submit()
819 list_splice_init(&chan->queued, &head); nbpf_chan_idle()
1031 INIT_LIST_HEAD(&chan->queued); nbpf_alloc_chan_resources()
1223 /* On error: abort all queued transfers, no callback */ nbpf_err_irq()
H A Dfsldma.h157 * Descriptors which are queued to run, but have not yet been
/linux-4.1.27/drivers/media/platform/
H A Dfsl-viu.c131 struct list_head queued; member in struct:viu_dmaqueue
353 if (list_empty(&vidq->queued)) restart_video_queue()
355 buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue); restart_video_queue()
539 if (!list_empty(&vidq->queued)) { buffer_queue()
542 dprintk(1, "vidq pointer 0x%p, queued 0x%p\n", buffer_queue()
543 vidq, &vidq->queued); buffer_queue()
544 dprintk(1, "dev %p, queued: self %p, next %p, head %p\n", buffer_queue()
545 dev, &vidq->queued, vidq->queued.next, buffer_queue()
546 vidq->queued.prev); buffer_queue()
547 list_add_tail(&buf->vb.queue, &vidq->queued); buffer_queue()
549 dprintk(2, "[%p/%d] buffer_queue - append to queued\n", buffer_queue()
573 list_add_tail(&buf->vb.queue, &vidq->queued); buffer_queue()
575 dprintk(2, "[%p/%d] buffer_queue - first queued\n", buffer_queue()
1063 /* launch another DMA operation for an active/queued buffer */ viu_activate_next_buf()
1067 dprintk(1, "start another queued buffer: 0x%p\n", buf); viu_activate_next_buf()
1069 } else if (!list_empty(&vidq->queued)) { viu_activate_next_buf()
1070 buf = list_entry(vidq->queued.next, struct viu_buf, viu_activate_next_buf()
1074 dprintk(1, "start another queued buffer: 0x%p\n", buf); viu_activate_next_buf()
1313 dprintk(1, "Open: list_empty queued=%d\n", viu_open()
1314 list_empty(&dev->vidq.queued)); viu_open()
1535 INIT_LIST_HEAD(&viu_dev->vidq.queued); viu_of_probe()
/linux-4.1.27/drivers/media/platform/omap3isp/
H A Dispvideo.h67 /* At least one buffer is queued on the input video node. */
69 /* At least one buffer is queued on the output video node. */
144 * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
145 * if there was no buffer previously queued.
H A Dispccdc.h122 * @underrun: A buffer underrun occurred and a new buffer has been queued
H A Disppreview.h118 * @underrun: Whether the preview entity has queued buffers on the output
/linux-4.1.27/drivers/staging/media/omap4iss/
H A Diss_video.h66 /* At least one buffer is queued on the input video node. */
68 /* At least one buffer is queued on the output video node. */
139 * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
140 * if there was no buffer previously queued.
H A Diss_ipipeif.h56 * @underrun: A buffer underrun occurred and a new buffer has been queued
/linux-4.1.27/fs/ocfs2/cluster/
H A Dtcp_internal.h111 * connect_work is queued from set_nn_state both from hb up and from
119 /* this is queued as nodes come up and is canceled when a connection is
137 * queued. they should not be able to ref a freed sc. the teardown
147 * work is single-shot. the work is also queued from a sock
/linux-4.1.27/arch/mips/kvm/
H A Dinterrupt.h12 * MIPS Exception Priorities, exceptions (including interrupts) are queued up
/linux-4.1.27/kernel/sched/
H A Didle_task.c54 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) task_tick_idle() argument
H A Dstop_task.c77 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) task_tick_stop() argument
H A Dcore.c1186 int running, queued; wait_task_inactive() local
1224 queued = task_on_rq_queued(p); wait_task_inactive()
1256 if (unlikely(queued)) { wait_task_inactive()
2820 * If we are going to sleep and we have plugged IO queued, sched_submit_work()
3000 int oldprio, queued, running, enqueue_flag = 0; rt_mutex_setprio() local
3029 queued = task_on_rq_queued(p); rt_mutex_setprio()
3031 if (queued) rt_mutex_setprio()
3073 if (queued) rt_mutex_setprio()
3084 int old_prio, delta, queued; set_user_nice() local
3105 queued = task_on_rq_queued(p); set_user_nice()
3106 if (queued) set_user_nice()
3115 if (queued) { set_user_nice()
3420 int retval, oldprio, oldpolicy = -1, queued, running; __sched_setscheduler() local
3616 queued = task_on_rq_queued(p); __sched_setscheduler()
3618 if (queued) __sched_setscheduler()
3628 if (queued) { __sched_setscheduler()
4730 * move_queued_task - move a queued task to new rq.
4900 bool queued, running; sched_setnuma() local
4903 queued = task_on_rq_queued(p); sched_setnuma()
4906 if (queued) sched_setnuma()
4915 if (queued) sched_setnuma()
7354 int queued; normalize_task() local
7356 queued = task_on_rq_queued(p); normalize_task()
7357 if (queued) normalize_task()
7360 if (queued) { normalize_task()
7543 int queued, running; sched_move_task() local
7550 queued = task_on_rq_queued(tsk); sched_move_task()
7552 if (queued) sched_move_task()
7569 tsk->sched_class->task_move_group(tsk, queued); sched_move_task()
7576 if (queued) sched_move_task()
/linux-4.1.27/net/rds/
H A Dsend.c130 * - queued acks can be delayed behind large messages
132 * - small message latency is higher behind queued large messages
150 * sendmsg calls here after having queued its message on the send rds_send_xmit()
400 * not try and send their newly queued message. We need to check the rds_send_xmit()
667 * Transports call here when they've determined that the receiver queued
779 * we only want this to fire once so we use the callers 'queued'. It's
785 __be16 dport, int *queued) rds_send_queue_rm()
790 if (*queued) rds_send_queue_rm()
835 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", rds_send_queue_rm()
839 *queued = 1; rds_send_queue_rm()
844 return *queued; rds_send_queue_rm()
962 int queued = 0, allocated_mr = 0; rds_sendmsg() local
1066 dport, &queued)) { rds_sendmsg()
1082 &queued), rds_sendmsg()
1084 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); rds_sendmsg()
783 rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, struct rds_message *rm, __be16 sport, __be16 dport, int *queued) rds_send_queue_rm() argument
H A Daf_rds.c65 * messages shouldn't be queued.
162 * - A notification has been queued to the socket (this can be a congestion
H A Dconnection.c359 /* make sure lingering queued work won't try to ref the conn */ rds_conn_destroy()
363 /* tear down queued messages */ rds_conn_destroy()
/linux-4.1.27/include/kvm/
H A Darm_arch_timer.h47 /* Work queued with the above timer expires */
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Drx.c94 * point, the signal to deliver the whole (or part) of the queued
101 * - reset queue: send all queued packets to the OS
105 * - update ws: update the queue's window start and deliver queued
109 * deliver queued packets that meet the criteria
176 * Goes over the list of queued reports in i2400m->rx_reports and
198 d_printf(1, dev, "processing queued reports\n"); i2400m_report_hook_work()
200 d_printf(2, dev, "processing queued report %p\n", args); i2400m_report_hook_work()
211 * Flush the list of queued reports
221 d_printf(1, dev, "flushing queued reports\n"); i2400m_report_hook_flush()
226 d_printf(2, dev, "flushing queued report %p\n", args); i2400m_report_hook_flush()
258 d_printf(2, dev, "queued report %p\n", args); i2400m_report_hook_queue()
389 * they are queued and at some point the queue is i2400m_rx_ctl()
464 * Reorder queue data stored on skb->cb while the skb is queued in the
484 * skb when queued here contains a 'struct i2400m_roq_data' were we
685 /* NSN bounds assumed correct (checked when it was queued) */ __i2400m_roq_queue()
701 /* NSN bounds assumed correct (checked when it was queued) */ __i2400m_roq_queue()
703 d_printf(2, dev, "ERX: roq %p - queued before %p " __i2400m_roq_queue()
718 /* NSN bounds assumed correct (checked when it was queued) */ __i2400m_roq_queue()
737 * to the networking stack all the queued skb's whose normalized
759 /* NSN bounds assumed correct (checked when it was queued) */ __i2400m_roq_update_ws()
/linux-4.1.27/net/decnet/
H A Ddn_nsp_in.c222 * the incoming data is in the correct format before it is queued to
583 * also allows data and other data to be queued to a socket.
616 int queued = 0; dn_nsp_otherdata() local
629 queued = 1; dn_nsp_otherdata()
635 if (!queued) dn_nsp_otherdata()
641 int queued = 0; dn_nsp_data() local
655 queued = 1; dn_nsp_data()
666 if (!queued) dn_nsp_data()
825 * sock_release() when there is a backlog queued up.
/linux-4.1.27/drivers/isdn/pcbit/
H A Dlayer2.c279 * deliver a queued frame to the upper layer
348 /* discard previous queued frame */ pcbit_receive()
425 printk("Type 1 frame and no frame queued\n"); pcbit_receive()
468 if (chan->queued) { pcbit_fake_conf()
469 chan->queued--; pcbit_fake_conf()
660 * call pcbit_transmit to write possible queued frames
H A Ddrv.c320 chan->queued = 0; pcbit_block_timer()
348 if (chan->queued >= MAX_QUEUED) pcbit_xmit()
353 chan->queued); pcbit_xmit()
373 chan->queued++; pcbit_xmit()
509 if (chan->queued == MAX_QUEUED) { pcbit_l3_receive()
515 chan->queued--; pcbit_l3_receive()
H A Dpcbit.h28 unsigned char queued; /* unacked data messages */ member in struct:pcbit_chan
/linux-4.1.27/kernel/locking/
H A Drwsem-xadd.c47 * 0xffff0000 (1) There are writers or readers queued but none active
135 * will block as they will notice the queued writer. __rwsem_do_wake()
233 /* If there are no active locks, wake the front queued process(es). rwsem_down_read_failed()
426 bool waiting = true; /* any queued threads before us */ rwsem_down_write_failed()
456 * If there were already threads queued before us and there are rwsem_down_write_failed()
458 * wake any read locks that were queued ahead of us. rwsem_down_write_failed()
H A Dosq_lock.c33 * Can return NULL in case we were the last queued and we updated @lock instead.
55 * We were the last queued, we moved @lock back. @prev osq_wait_next()
/linux-4.1.27/drivers/tty/serial/
H A Damba-pl011.c143 bool queued; member in struct:pl011_dmatx_data
422 if (uap->dmatx.queued) pl011_dma_tx_callback()
436 * a TX buffer completing, we must update the tx queued status to pl011_dma_tx_callback()
441 uap->dmatx.queued = false; pl011_dma_tx_callback()
460 * 1 if we queued up a TX DMA buffer.
481 uap->dmatx.queued = false; pl011_dma_tx_refill()
513 uap->dmatx.queued = false; pl011_dma_tx_refill()
522 uap->dmatx.queued = false; pl011_dma_tx_refill()
543 uap->dmatx.queued = true; pl011_dma_tx_refill()
564 * true if we queued a DMA buffer
572 * If we already have a TX buffer queued, but received a pl011_dma_tx_irq()
576 if (uap->dmatx.queued) { pl011_dma_tx_irq()
585 * We don't have a TX buffer queued, so try to queue one. pl011_dma_tx_irq()
586 * If we successfully queued a buffer, mask the TX IRQ. pl011_dma_tx_irq()
602 if (uap->dmatx.queued) { pl011_dma_tx_stop()
610 * character queued for send, try to get that character out ASAP.
614 * true if we have a buffer queued
627 if (!uap->dmatx.queued) { pl011_dma_tx_start()
688 if (uap->dmatx.queued) {
691 uap->dmatx.queued = false;
1076 if (uap->dmatx.queued) { pl011_dma_shutdown()
1079 uap->dmatx.queued = false; pl011_dma_shutdown()
1253 * Returns true if the character was successfully queued to the FIFO.
/linux-4.1.27/drivers/mfd/
H A Dcs5535-mfd.c9 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
190 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
/linux-4.1.27/drivers/net/wireless/rt2x00/
H A Drt2x00dump.h63 * @DUMP_FRAME_TX: This frame is queued for transmission to the hardware.
68 * @DUMP_FRAME_BEACON: This beacon frame is queued for transmission to the
/linux-4.1.27/arch/tile/include/hv/
H A Dnetio_errors.h63 * be queued until some of the queued packets are actually transmitted. */
/linux-4.1.27/lib/
H A Ddynamic_queue_limits.c43 * when enqueuing it was possible that all queued data dql_completed()
62 * If there is slack, the amount of execess data queued above dql_completed()
/linux-4.1.27/drivers/media/pci/zoran/
H A Dzoran.h108 BUZ_STATE_PEND, /* buffer is queued in pend[] ready to feed to I/O */
109 BUZ_STATE_DMA, /* buffer is queued in dma[] for I/O */
339 unsigned long jpg_que_head; /* Index where to put next buffer which is queued */
346 unsigned long jpg_queued_num; /* count of frames queued since grab/play started */
/linux-4.1.27/drivers/usb/dwc3/
H A Dgadget.h75 req->queued = true; dwc3_gadget_move_request_queued()
/linux-4.1.27/include/net/netfilter/
H A Dnf_queue.h8 /* Each queued (to userspace) skbuff has one of these. */
/linux-4.1.27/include/uapi/linux/
H A Dfanotify.h13 #define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
H A Dmeye.h52 /* sync a previously queued mjpeg buffer */
H A Dmqueue.h29 __kernel_long_t mq_curmsgs; /* number of messages currently queued */
H A Dblktrace_api.h38 __BLK_TA_QUEUE = 1, /* queued */
H A Dinotify.h44 #define IN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
H A Ddccp.h208 /* DCCP priorities for outgoing/queued packets */
H A Ddlmconstants.h68 * Force a conversion request to be queued, even if it is compatible with
H A Dsysctl.h80 INOTIFY_MAX_QUEUED_EVENTS=3 /* max queued events per instance */
110 KERN_RTSIGNR=32, /* Number of rt sigs queued */
/linux-4.1.27/net/ipv6/netfilter/
H A Dnf_defrag_ipv6_hooks.c68 /* queued */ ipv6_defrag()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dnrs_fifo.c59 * It schedules RPCs in the same order as they are queued from LNet.
186 * Adds request \a nrq to \a policy's list of queued requests
211 * Removes request \a nrq from \a policy's list of queued requests.
H A Dlproc_ptlrpc.c572 * queued: 0
578 * queued: 2015
585 * queued: 0
591 * queued: 0
601 " queued: %-20d\n"
/linux-4.1.27/crypto/
H A Dchainiv.c125 int queued; async_chainiv_schedule_work() local
137 queued = queue_work(kcrypto_wq, &ctx->postponed); async_chainiv_schedule_work()
138 BUG_ON(!queued); async_chainiv_schedule_work()
/linux-4.1.27/drivers/usb/gadget/udc/
H A Dmv_u3d.h293 struct list_head queue; /* ep request queued hardware */
299 queued on haredware */
311 struct list_head queue; /* ep requst queued on hardware */
/linux-4.1.27/drivers/usb/serial/
H A Dgarmin_gps.c236 * (if yes, all queued data will be dropped)
324 /* free up all queued data */ pkt_clear()
440 queued data. */ gsp_rec_packet()
465 * If the input is an ack, just send the last queued packet to the
468 * if the input is an abort command, drop all queued data.
794 flush all queued data. */ nat_receive()
865 /* flush all queued data */ garmin_clear()
1305 * Sends the next queued packt to the tty port (garmin native mode only)
1306 * and then sets a timer to call itself again until all queued data
1354 /* in native mode send queued data to tty, in garmin_unthrottle()
1369 * The timer is currently only used to send queued packets to
1377 /* send the next queued packet to the tty port */ timeout_handler()
/linux-4.1.27/fs/notify/
H A Dnotification.c76 /* If the event is still queued, we have a problem... */ fsnotify_destroy_event()
84 * added to the queue, 1 if the event was merged with some other queued event,
101 /* Queue overflow event only if it isn't already queued */ fsnotify_add_event()
/linux-4.1.27/include/media/
H A Dvideobuf2-core.h141 * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver
142 * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
179 * @queued_entry: entry on the queued buffers list, which holds all
180 * buffers queued from userspace
264 * @buf_prepare: called every time the buffer is queued from userspace
271 * the buffer will not be queued in driver; optional.
289 * of already queued buffers in count parameter; driver
298 * many buffers have been queued up by userspace.
310 * pre-queued buffers before calling STREAMON.
364 * have been queued into the driver.
370 * @queued_list: list of buffers currently queued from userspace
371 * @queued_count: number of buffers queued and ready for streaming.
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_crtc.c57 /* list of queued apply's: */
60 /* for handling queued and in-progress applies: */
329 /* then handle the next round of of queued apply's: */ apply_worker()
334 apply->queued = false; apply_worker()
365 /* no need to queue it again if it is already queued: */ omap_crtc_apply()
366 if (apply->queued) omap_crtc_apply()
369 apply->queued = true; omap_crtc_apply()
/linux-4.1.27/fs/xfs/
H A Dxfs_mru_cache.c114 unsigned int queued; /* work has been queued */ member in struct:xfs_mru_cache
215 if (!mru->queued) { _xfs_mru_cache_list_insert()
216 mru->queued = 1; _xfs_mru_cache_list_insert()
291 mru->queued = next; _xfs_mru_cache_reap()
292 if ((mru->queued > 0)) { _xfs_mru_cache_reap()
398 if (mru->queued) { xfs_mru_cache_flush()
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-mem2mem.c40 /* Instance is already queued on the job_queue */
57 * @job_queue: instances queued to run
197 * 1) at least one source buffer has to be queued,
198 * 2) at least one destination buffer has to be queued,
284 * 2] If the context is queued, then the context will be removed from
550 * There has to be at least one buffer queued on each queued_list, which v4l2_m2m_poll()
H A Dvideobuf2-core.c897 * queued without ever calling STREAMON. __reqbufs()
1163 * cannot use this buffer anymore until it is queued back to it by videobuf
1164 * by the means of buf_queue callback. Only buffers previously queued to the
1771 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1783 * If any buffers were queued before streamon, vb2_start_streaming()
1856 * Add to the queued buffers list, a buffer will stay on it until vb2_internal_qbuf()
1888 * start_streaming() since not enough buffers were queued, and vb2_internal_qbuf()
1889 * we now have reached the minimum number of queued buffers, vb2_internal_qbuf()
2159 * Removes all queued buffers from driver's queue and all buffers queued by
2167 * Tell driver to stop all transactions and release all queued __vb2_queue_cancel()
2206 * Make sure to call buf_finish for any queued buffers. Normally __vb2_queue_cancel()
2296 * 2) passes any previously queued buffers to the driver and starts streaming
2342 * 2) stop streaming and dequeues any queued buffers, including those previously
2666 * buffers queued than there are buffers available. vb2_poll()
2767 unsigned int queued:1; member in struct:vb2_fileio_buf
2776 * @initial_index: in the read() case all buffers are queued up immediately
2779 * queued, instead whenever a buffer is full it is queued up by
2781 * been queued up will __vb2_perform_fileio() start to dequeue
2786 * available buffers have been queued and __vb2_perform_fileio()
2910 fileio->bufs[i].queued = 1; __vb2_init_fileio()
2913 * All buffers have been queued, so mark that by setting __vb2_init_fileio()
3030 buf->queued = 0; __vb2_perform_fileio()
3105 * Buffer has been queued, update the status __vb2_perform_fileio()
3108 buf->queued = 1; __vb2_perform_fileio()
3119 * queued for the first time (initial_index < q->num_buffers) __vb2_perform_fileio()
3121 * time we need to dequeue a buffer since we've now queued up __vb2_perform_fileio()
H A Dvideobuf-core.c211 dprintk(1, "busy: buffer #%d queued\n", i); videobuf_queue_is_busy()
272 /* remove queued buffers from list */ videobuf_queue_cancel()
566 dprintk(1, "qbuf: buffer is already queued or active.\n"); videobuf_qbuf()
/linux-4.1.27/drivers/net/fddi/skfp/h/
H A Dhwmtm.h136 SMbuf *llc_rx_pipe ; /* points to the first queued llc fr */
137 SMbuf *llc_rx_tail ; /* points to the last queued llc fr */
138 int queued_rx_frames ; /* number of queued frames */
H A Dtargetos.h96 #define MAX_TX_QUEUE_LEN 20 // number of packets queued by driver
/linux-4.1.27/drivers/scsi/aacraid/
H A Ddpcsup.c167 * This DPC routine will be queued when the adapter interrupts us to
197 * Allocate a FIB at all costs. For non queued stuff aac_command_normal()
297 * Allocate a FIB. For non queued stuff we can just use aac_intr_normal()
/linux-4.1.27/drivers/net/wan/
H A Dwanxl.h62 #define DOORBELL_TO_CARD_TX_0 8 /* outbound packet queued */
/linux-4.1.27/drivers/media/platform/xilinx/
H A Dxilinx-dma.h70 * @queued_bufs: list of queued buffers
H A Dxilinx-dma.c284 * @queue: buffer list entry in the DMA engine queued buffers list
434 /* Give back all queued buffers to videobuf2. */ xvip_dma_start_streaming()
461 /* Give back all queued buffers to videobuf2. */ xvip_dma_stop_streaming()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-io.h31 pvr2_buffer_state_queued = 2, // Buffer has been queued for filling
H A Dpvrusb2-debugifc.c180 " URBs: queued=%u idle=%u ready=%u" pvr2_debugifc_print_status()
/linux-4.1.27/drivers/staging/ft1000/ft1000-usb/
H A Dft1000_usb.h24 int NumOfMsg; /* number of messages queued up */
/linux-4.1.27/drivers/staging/unisys/virtpci/
H A Dvirtpci.h71 * rsps are queued & retrieved */
/linux-4.1.27/drivers/net/wireless/ath/
H A Ddfs_pri_detector.h56 * @max_count: maximum number of pulses to be queued
/linux-4.1.27/drivers/net/wireless/b43legacy/
H A Dpio.h69 /* Packets on the txqueue are queued,
/linux-4.1.27/drivers/usb/class/
H A Dcdc-acm.h123 struct usb_anchor delayed; /* writes queued for a device about to be woken */
/linux-4.1.27/include/net/
H A Dinetpeer.h45 * Once inet_peer is queued for deletion (refcnt == -1), following field
/linux-4.1.27/include/linux/sunrpc/
H A Dmetrics.h65 ktime_t om_queue, /* queued for xmit */
/linux-4.1.27/ipc/
H A Dcompat_mq.c21 compat_long_t mq_curmsgs; /* number of messages currently queued */
/linux-4.1.27/kernel/
H A Dtask_work.c46 * Find the last queued pending work with ->func == @func and remove
H A Dsmp.c69 * The IPIs for the smp-call-function callbacks queued by other hotplug_cfd()
201 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
204 * Flush any pending smp-call-function callbacks queued on this CPU. This is
209 * Loop through the call_single_queue and run all the queued callbacks.
255 * Handle irq works queued remotely by irq_work_queue_on().
H A Dstop_machine.c49 * the stoppers could get queued up in reverse order, leading to
223 * This guarantees that both work1 and work2 get queued, before
277 * queued its stop_machine works and therefore ours will get executed stop_two_cpus()
291 * that works are always queued in the same order on every CPU. stop_two_cpus()
H A Dkthread.c543 * kthread_worker without an attached kthread simply collects queued
605 * if @work was successfully queued, %false if it was already pending.
639 * If @work is queued or executing, wait for it to finish execution.
H A Dworkqueue.c193 * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
606 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
607 * contain the pointer to the queued pwq. Once execution starts, the flag
617 * queued anywhere after initialization until it is sync canceled. pwq is
618 * available only while the work item is queued.
680 * a @work is not queued in a hope, that CPU#1 will eventually set_work_pool_and_clear_pending()
681 * finish the queued @work. Meanwhile CPU#1 does not see set_work_pool_and_clear_pending()
1231 * guaranteed that the timer is not queued anywhere and not try_to_grab_pending()
1243 * The queueing is in progress, or it is already queued. Try to try_to_grab_pending()
1253 * item is queued on pwq->wq, and both updating work->data to point try_to_grab_pending()
1257 * item is currently queued on that pool. try_to_grab_pending()
1276 /* work->data points to pwq iff queued, point to pool */ try_to_grab_pending()
1326 * Test whether @work is being queued from another work executing on the
1353 * queued or lose PENDING. Grabbing PENDING and queueing should __queue_work()
1376 * running there, in which case the work needs to be queued on that __queue_work()
1557 * Return: %false if @dwork was idle and queued, %true if @dwork was
2066 * PENDING and queued state changes happen together while IRQ is
2247 * workqueues which have works queued on the pool and let them process
2274 * pwq(s) queued. This can happen by non-rescuer workers consuming rescuer_thread()
2389 * Currently, a queued barrier can't be canceled. This is because
2514 * This function sleeps until all work items which were queued on entry
2880 * queued can't be destroyed before this function returns.
2895 * Delayed timer is cancelled and the pending work is queued for
3020 * need to know that a particular work item isn't queued and isn't running.
/linux-4.1.27/include/linux/dma/
H A Dipu-dma.h165 struct list_head queue; /* queued tx-descriptors */
/linux-4.1.27/include/linux/mfd/
H A Dipaq-micro.h80 * @node: list node if message gets queued
/linux-4.1.27/arch/ia64/sn/kernel/
H A Dbte_error.c27 * transfers to be queued.
189 * from being queued. bte_error_handler()
/linux-4.1.27/fs/btrfs/
H A Ddelayed-ref.h74 * reference count modifications we've queued up.
138 /* how many delayed ref updates we've queued, used by the
/linux-4.1.27/net/sctp/
H A Dprimitive.c96 * Gracefully closes an association. Any locally queued user data
112 * Ungracefully closes an association. Any locally queued user data
/linux-4.1.27/tools/usb/ffs-aio-example/simple/device_app/
H A Daio_simple.c332 if (ret >= 0) { /* if ret > 0 request is queued */ main()
346 if (ret >= 0) { /* if ret > 0 request is queued */ main()
/linux-4.1.27/drivers/s390/char/
H A Dcon3215.c86 struct raw3215_req *queued_read; /* pointer to queued read requests */
87 struct raw3215_req *queued_write;/* pointer to queued write requests */
139 * If there is a queued read request it is used, but that shouldn't happen
151 /* no queued read request, use new req structure */ raw3215_mk_read_req()
168 * buffer to the 3215 device. If a queued write exists it is replaced by
179 /* check if there is a queued write request */ raw3215_mk_write_req()
182 /* no queued write request, use new req structure */ raw3215_mk_write_req()
H A Dsclp.c41 /* List of queued requests. */
94 /* Timer for queued requests. */
269 * Timeout handler for queued requests. Removes request from list and
323 /* Try to start queued requests. */
H A Dsclp.h146 #define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
/linux-4.1.27/net/sunrpc/
H A Dsvc_xprt.c328 bool queued = false; svc_xprt_do_enqueue() local
358 * Once the xprt has been queued, it can only be dequeued by svc_xprt_do_enqueue()
363 if (!queued) { svc_xprt_do_enqueue()
391 if (!queued) { svc_xprt_do_enqueue()
392 queued = true; svc_xprt_do_enqueue()
517 /* skip any that aren't queued */ svc_wake_up()
651 /* was a socket queued? */ rqst_should_sleep()
685 * had to be queued, don't allow the thread to wait so svc_get_next_xprt()
1092 dprintk("revisit queued\n"); svc_revisit()
/linux-4.1.27/drivers/mailbox/
H A Dmailbox.c233 * non-negative token is returned for each queued request. If the request
234 * is not queued, a negative token is returned. Upon failure or successful
374 /* The queued TX requests are simply aborted, no callbacks are made */ mbox_free_channel()
/linux-4.1.27/arch/metag/kernel/
H A Dsmp.c452 * KICK interrupts are queued in hardware so we'll get for_each_cpu()
461 * and another until we've handled all the queued for_each_cpu()
468 * queued a KICK interrupt for 'msg'. for_each_cpu()
/linux-4.1.27/drivers/usb/host/whci/
H A Dwhci-hc.h213 * @stds: list of sTDs queued to this qset
214 * @ntds: number of qTDs queued (not necessarily the same as nTDs
227 * (smaller) transfers may be queued in a qset.
/linux-4.1.27/net/lapb/
H A Dlapb_in.c254 int queued = 0; lapb_state3_machine() local
408 queued = 1; lapb_state3_machine()
470 if (!queued) lapb_state3_machine()
/linux-4.1.27/drivers/isdn/act2000/
H A Dact2000.h122 short queued; /* User-Data Bytes in TX queue */ member in struct:act2000_chan
H A Dcapi.c486 chan->queued = 0; actcapi_disconnect_b3_resp()
585 * Decrement queued-bytes counter.
609 chan->queued -= m->msg.data_b3_req.datalen; handle_ack()
613 if (chan->queued < 0) handle_ack()
614 chan->queued = 0; handle_ack()
/linux-4.1.27/drivers/isdn/gigaset/
H A Dser-gigaset.c102 * transmit first queued command buffer
130 gig_dbg(DEBUG_OUTPUT, "send_cb: sent %d, left %u, queued %u", send_cb()
203 * throw away all data queued for sending
242 * number of bytes queued, or error code < 0
/linux-4.1.27/drivers/isdn/sc/
H A Dmessage.h29 queued */
/linux-4.1.27/drivers/scsi/sym53c8xx_2/
H A Dsym53c8xx.h183 * Max number of IO control blocks queued to the controller.
/linux-4.1.27/drivers/s390/net/
H A Dsmsgiucv_app.c208 /* cancel pending work and flush any queued event work */ smsgiucv_app_exit()
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
H A Dconrpc.h91 struct list_head tas_rpcs_list; /* queued requests */
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_io.c113 int queued = 0; osc_io_submit() local
173 if (++queued == max_pages) { cl_page_list_for_each_safe()
174 queued = 0; cl_page_list_for_each_safe()
182 if (queued > 0)
/linux-4.1.27/drivers/staging/olpc_dcon/
H A Dolpc_dcon_xo_1.c7 * Copyright (c) 2010 Andres Salomon <dilinger@queued.net>
/linux-4.1.27/drivers/of/
H A Dpdt.c10 * Adapted for multiple architectures by Andres Salomon <dilinger@queued.net>
/linux-4.1.27/drivers/net/wireless/b43/
H A Dpio.h77 /* The number of packets that can still get queued.
/linux-4.1.27/drivers/clocksource/
H A Dcs5535-clockevt.c189 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
/linux-4.1.27/arch/x86/platform/olpc/
H A Dolpc-xo1-pm.c4 * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
/linux-4.1.27/drivers/vfio/
H A Dvirqfd.c65 * queued for release, as determined by testing whether the virqfd_wakeup()
/linux-4.1.27/fs/ocfs2/dlm/
H A Ddlmthread.c553 /* possible that another ast was queued while dlm_flush_asts()
556 mlog(0, "%s: res %.*s, AST queued while flushing last " dlm_flush_asts()
605 /* possible that another bast was queued while dlm_flush_asts()
608 mlog(0, "%s: res %.*s, BAST queued while flushing last " dlm_flush_asts()
H A Ddlmast.c55 /* Should be called as an ast gets queued to see if the new
57 * For example, if dlm_thread queued a bast for an EX lock that
H A Ddlmconvert.c294 "owner has already queued and sent ast to me. res %.*s, " dlmconvert_remote()
327 * need to wait for a reply as to whether it got queued or not. */ dlmconvert_remote()
/linux-4.1.27/fs/ufs/
H A Dufs.h30 int work_queued; /* non-zero if the delayed work is queued */
/linux-4.1.27/fs/ncpfs/
H A Dncp_fs_sb.h134 struct list_head requests; /* STREAM only: queued requests */
/linux-4.1.27/include/scsi/
H A Dscsi_transport_srp.h82 * timer if the device on which it has been queued is blocked.
/linux-4.1.27/net/llc/
H A Dllc_if.c36 * will be locked and received frames and expired timers will be queued.
H A Dllc_conn.c257 /* any PDUs to re-send are queued up; start sending to MAC */ llc_conn_resend_i_pdu_as_cmd()
298 /* any PDUs to re-send are queued up; start sending to MAC */ llc_conn_resend_i_pdu_as_rsp()
341 * llc_conn_send_pdus - Sends queued PDUs
344 * Sends queued pdus to MAC layer for transmission.
854 * @skb: queued rx frame or event
/linux-4.1.27/net/netfilter/
H A Dnf_queue.c72 /* Drop reference to owner of hook which queued us. */ nf_queue_entry_release_refs()
H A Dnfnetlink_queue_core.c69 * Following fields are dirtied for each queued packet,
640 unsigned int queued; nfqnl_enqueue_packet() local
678 queued = 0; nfqnl_enqueue_packet()
686 queued++; nfqnl_enqueue_packet()
692 if (queued) { nfqnl_enqueue_packet()
693 if (err) /* some segments are already queued */ nfqnl_enqueue_packet()
/linux-4.1.27/sound/hda/
H A Dhdac_bus.c133 * process queued unsolicited events
/linux-4.1.27/tools/perf/bench/
H A Dfutex-wake.c7 * in non-error situations: all waiters are queued and all wake calls wakeup
/linux-4.1.27/drivers/staging/unisys/include/
H A Duisqueue.h38 * responses are queued
62 * wait for the queue to become non-full. If command is queued, return
/linux-4.1.27/drivers/staging/lustre/lustre/include/lustre/
H A Dlustre_errno.h199 #define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
201 #define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
/linux-4.1.27/drivers/crypto/ccp/
H A Dccp-crypto-main.c176 /* Since we have already queued the cmd, we must indicate that ccp_crypto_complete()
212 /* Check if the cmd can/should be queued */ ccp_crypto_enqueue_cmd()
H A Dccp-dev.c77 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
87 * The cmd has been successfully queued if:
H A Dccp-dev.h209 * Master lists that all cmds are queued on. Because there can be
/linux-4.1.27/drivers/usb/host/
H A Dimx21-dbg.c359 "queued for ETD: %lu\n" debug_statistics_show_one()
360 "queued for DMEM: %lu\n\n", debug_statistics_show_one()
H A Dimx21-hcd.c45 * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
404 /* Memory now available for a queued ETD - activate it */ activate_queued_etd()
413 dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", activate_queued_etd()
451 /* Try again to allocate memory for anything we've queued */ free_dmem()
481 /* Endpoint now idle - release its ETD(s) or assign to queued request */ ep_idle()
503 "assigning idle etd %d for queued request\n", etd_num); ep_idle()
512 dev_err(imx21->dev, "No urb for queued ep!\n"); ep_idle()
1226 "no ETD available already queued %p\n", imx21_hc_urb_enqueue()
H A Dimx21-hcd.h425 struct list_head queue_for_etd; /* eps queued due to etd shortage */
426 struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
H A Dohci-q.c252 // FIXME if there are TDs queued, fail them! ed_schedule()
307 * - ED_OPER: when there's any request queued, the ED gets rescheduled
586 * processed as soon as they're queued.
862 * then we need to leave the control STATUS packet queued ed_halted()
1077 * If no TDs are queued, take ED off the ed_rm_list. finish_unlinks()
/linux-4.1.27/drivers/usb/wusbcore/
H A Dwa-nep.c193 dev_err(dev, "Too many notifications queued, " wa_nep_queue()
218 * queued to the workqueue.
/linux-4.1.27/drivers/iommu/
H A Dintel_irq_remapping.c645 * If the queued invalidation is already initialized, for_each_iommu()
657 * Disable intr remapping and queued invalidation, if already for_each_iommu()
679 * Enable queued invalidation for all the DRHD's.
685 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " for_each_iommu()
1258 /* Enable queued invalidation */ dmar_ir_add()
1263 pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n", dmar_ir_add()
H A Ddmar.c1183 * Submit the queued invalidation descriptor to the remapping
1367 * Enable queued invalidation.
1408 * queued invalidation is already setup and enabled. dmar_enable_qi()
1709 * First disable queued invalidation. dmar_reenable_qi()
1713 * Then enable queued invalidation again. Since there is no pending dmar_reenable_qi()
1714 * invalidation requests now, it's safe to re-enable queued dmar_reenable_qi()
/linux-4.1.27/net/mac80211/
H A Dtx.c497 * been queued to pending queue. No reordering can happen, go ieee80211_tx_h_unicast_ps_buf()
529 * We queued up some frames, so the TIM bit might ieee80211_tx_h_unicast_ps_buf()
1065 bool queued = false; ieee80211_tx_prep_agg() local
1105 queued = true; ieee80211_tx_prep_agg()
1123 return queued; ieee80211_tx_prep_agg()
1185 bool queued; ieee80211_tx_prepare() local
1187 queued = ieee80211_tx_prep_agg(tx, skb, info, ieee80211_tx_prepare()
1190 if (unlikely(queued)) ieee80211_tx_prepare()
1380 * Returns false if the frame couldn't be transmitted but was queued instead.
1447 * frame was dropped or queued.
1540 * Returns false if the frame couldn't be transmitted but was queued instead.
1645 return; /* skb queued: don't free */ ieee80211_xmit()
2487 * Returns false if the frame couldn't be transmitted but was queued instead,
2488 * which in this case means re-queued -- take as an indication to stop sending
/linux-4.1.27/drivers/spi/
H A Dspi.c954 * spi_get_next_queued_message() - called by driver to check for queued
956 * @master: the master to check for queued messages
1120 * spi_queued_transfer - transfer function for queued transfers
1122 * @msg: spi message which is to handled is queued to driver queue
1143 master->queued = true; spi_master_initialize_queue()
1567 /* If we're using a queued driver, start the queue */ spi_register_master()
1647 if (master->queued) { spi_unregister_master()
1665 /* Basically no-ops for non-queued masters */ spi_master_suspend()
1666 if (!master->queued) spi_master_suspend()
1681 if (!master->queued) spi_master_resume()
1735 * Context: can sleep, and no requests are queued to the device
1961 * no other spi_message queued to that device will be processed.
2013 * no other spi_message queued to that device will be processed.
H A Dspi-sc18is602.c51 int tlen; /* Data queued for tx in buffer */
/linux-4.1.27/drivers/media/platform/s5p-mfc/
H A Ds5p_mfc_common.h505 * @src_queue_cnt: number of buffers queued on the source internal queue
506 * @dst_queue_cnt: number of buffers queued on the dest internal queue
532 * @dec_dst_flag: flags for buffers queued in the hardware
/linux-4.1.27/drivers/scsi/aic7xxx/
H A Daic7xxx_osm.h261 * queued to the device.
267 * transactions that can be queued to
318 * How many transactions have been queued
H A Daic79xx_osm.h252 * queued to the device.
258 * transactions that can be queued to
314 * How many transactions have been queued
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_user_sdma.c58 u32 counter; /* sdma pkts queued counter for this entry */
75 * pkts sent to dma engine are queued on this
89 /* as packets go on the queued queue, they are counted... */
/linux-4.1.27/drivers/isdn/hysdn/
H A Dboardergo.c70 /* the card after booting. The task may be queued from everywhere */
150 /* enable or disable the cards error log. The event is queued if possible */
H A Dhysdn_sched.c179 hysdn_addlog(card, "async tx-cfg data queued"); hysdn_tx_cfgline()
/linux-4.1.27/drivers/macintosh/
H A Dvia-maciisi.c529 /* Do any queued requests now */ maciisi_interrupt()
639 /* Do any queued requests now if possible */ maciisi_interrupt()
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dhtc.h300 * receive direction, however, the buffer when queued up
544 HTC_SEND_QUEUE_OK = 0, /* packet was queued */
/linux-4.1.27/drivers/scsi/
H A Dhosts.c605 * 1 - work queued for execution
606 * 0 - work is already queued
/linux-4.1.27/drivers/pci/hotplug/
H A Dpciehp_ctrl.c489 "Link Down event queued on slot(%s): currently getting powered on\n", handle_link_event()
498 "Link Up event queued on slot(%s): currently getting powered off\n", handle_link_event()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Dintel_audio.c227 * rest of the setup into a vblank work item, queued here, but the hsw_audio_codec_enable()
332 * rest of the setup into a vblank work item, queued here, but the ilk_audio_codec_enable()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_req.h63 * It may be queued for sending.
90 /* An empty flush is queued as P_BARRIER,
/linux-4.1.27/drivers/target/tcm_fc/
H A Dtfc_cmd.c182 * been re-queued by target-core. ft_queue_status()
273 /* XXX need to find cmd if queued */ ft_recv_seq()
/linux-4.1.27/drivers/tty/
H A Dtty_buffer.c111 * Remove all the buffers pending on a tty whether queued with data
166 have queued and recycle that ? */ tty_buffer_alloc()
/linux-4.1.27/drivers/usb/dwc2/
H A Dhcd_intr.c115 * transactions may be queued to the DWC_otg controller for the current
116 * (micro)frame. Periodic transactions may be queued to the controller
770 * be queued when there is space in the request queue.
811 * Move the QH from the periodic queued schedule to dwc2_halt_channel()
813 * halt to be queued when the periodic schedule is dwc2_halt_channel()
861 * the channel is released. This allows transactions to be queued dwc2_complete_non_periodic_xfer()
1218 * queued as request queue space is available. dwc2_hc_nak_intr()
1970 dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n", dwc2_hc_n_intr()
/linux-4.1.27/fs/ocfs2/dlmfs/
H A Duserdlm.c319 * several basts in a row, we might be queued by the first user_dlm_unblock_lock()
320 * one, the unblock thread might run and clear the queued user_dlm_unblock_lock()
/linux-4.1.27/fs/nfs/
H A Dunlink.c319 /* Cancel a queued async unlink. Called when a sillyrename run fails. */
464 * queued async unlink if it failed.
/linux-4.1.27/net/irda/irlan/
H A Dirlan_client.c202 /* Check if we have some queued commands waiting to be sent */ irlan_client_ctrl_data_indication()
228 /* Remove frames queued on the control channel */ irlan_client_ctrl_disconnect_indication()
/linux-4.1.27/drivers/xen/xenbus/
H A Dxenbus_dev_frontend.c120 /* Read out any raw xenbus messages queued up. */ xenbus_file_read()
181 * multiple queued buffers on a temporary local list, and then add it
/linux-4.1.27/drivers/usb/misc/
H A Dusbtest.c223 /* Support for testing basic non-queued I/O streams.
473 /* We use scatterlist primitives to test queued I/O.
1006 * (a) queues work for control, keeping N subtests queued and
1140 /* signal completion when nothing's queued */ ctrl_complete()
1732 * with lots back-to-back queued requests. ctrl_out()
2121 /* Simple non-queued bulk I/O tests */ usbtest_ioctl()
2260 /* non-queued sanity tests for control (chapter 9 subset) */ usbtest_ioctl()
2273 /* queued control messaging */ usbtest_ioctl()
2283 /* simple non-queued unlinks (ring with one urb) */ usbtest_ioctl()
2468 "unlink queued writes failed %d, " usbtest_ioctl()
2475 /* Simple non-queued interrupt I/O tests */ usbtest_ioctl()
/linux-4.1.27/drivers/isdn/hisax/
H A Dhfc_usb.h115 {-ENXIO, "URB already queued"},
/linux-4.1.27/drivers/iio/common/ssp_sensors/
H A Dssp.h184 * @pending_list: pending list for messages queued to be sent/read
/linux-4.1.27/drivers/mmc/core/
H A Dsdio_cis.c328 * not going to be queued for a driver. sdio_read_cis()
/linux-4.1.27/drivers/mtd/
H A Dftl.c611 int queued, ret; reclaim_block() local
618 queued = 0; reclaim_block()
629 queued = 1; reclaim_block()
649 if (queued) { reclaim_block()
/linux-4.1.27/drivers/media/pci/cx23885/
H A Dcx23885-vbi.c176 * This is the risc program of the first buffer to be queued if the active list
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
H A Dhtt.h841 /* Num HTT cookies queued to dispatch list */
847 /* Num MSDU queued to WAL */
856 /* Num Local frames queued */
862 /* Num queued to HW */
/linux-4.1.27/drivers/rapidio/switches/
H A Dtsi57x.c230 /* Remove any queued packets by locking/unlocking port */ tsi57x_em_handler()
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A Devent.c259 * if the work is already queued, it should take place. wl12xx_for_each_wlvif_sta()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
H A Dfwdesc.h81 /* Firmware will pass BA when BARs are queued */
/linux-4.1.27/drivers/net/wireless/brcm80211/include/
H A Dbrcmu_utils.h69 u16 max; /* maximum number of queued packets */
/linux-4.1.27/drivers/gpio/
H A Dgpio-cs5535.c377 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
/linux-4.1.27/drivers/input/touchscreen/
H A Dmc13783_ts.c53 * be queued for future execution (it rearms itself) it will not mc13783_ts_handler()

Completed in 4468 milliseconds

12345