Home
last modified time | relevance | path

Searched refs:queued (Results 1 – 166 of 166) sorted by relevance

/linux-4.1.27/net/rose/
Drose_in.c107 int queued = 0; in rose_state3_machine() local
169 queued = 1; in rose_state3_machine()
206 return queued; in rose_state3_machine()
266 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local
275 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame()
278 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame()
281 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame()
284 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame()
287 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame()
293 return queued; in rose_process_rx_frame()
/linux-4.1.27/net/x25/
Dx25_in.c206 int queued = 0; in x25_state3_machine() local
273 queued = 1; in x25_state3_machine()
311 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine()
315 queued = 1; in x25_state3_machine()
326 return queued; in x25_state3_machine()
384 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local
393 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame()
396 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame()
399 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in x25_process_rx_frame()
402 queued = x25_state4_machine(sk, skb, frametype); in x25_process_rx_frame()
[all …]
Dx25_dev.c56 int queued = 1; in x25_receive_data() local
61 queued = x25_process_rx_frame(sk, skb); in x25_receive_data()
63 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); in x25_receive_data()
67 return queued; in x25_receive_data()
/linux-4.1.27/net/dccp/
Dinput.c49 int queued = 0; in dccp_rcv_close() local
80 queued = 1; in dccp_rcv_close()
90 return queued; in dccp_rcv_close()
95 int queued = 0; in dccp_rcv_closereq() local
105 return queued; in dccp_rcv_closereq()
117 queued = 1; in dccp_rcv_closereq()
124 return queued; in dccp_rcv_closereq()
528 int queued = 0; in dccp_rcv_respond_partopen_state_process() local
565 queued = 1; /* packet was queued in dccp_rcv_respond_partopen_state_process()
571 return queued; in dccp_rcv_respond_partopen_state_process()
[all …]
/linux-4.1.27/net/netrom/
Dnr_in.c156 int queued = 0; in nr_state3_machine() local
229 queued = 1; in nr_state3_machine()
276 return queued; in nr_state3_machine()
283 int queued = 0, frametype; in nr_process_rx_frame() local
292 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame()
295 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame()
298 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame()
304 return queued; in nr_process_rx_frame()
/linux-4.1.27/net/ax25/
Dax25_std_in.c146 int queued = 0; in ax25_std_state3_machine() local
228 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine()
261 return queued; in ax25_std_state3_machine()
271 int queued = 0; in ax25_std_state4_machine() local
383 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine()
416 return queued; in ax25_std_state4_machine()
424 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local
430 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
433 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in()
436 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in()
[all …]
Dax25_ds_in.c150 int queued = 0; in ax25_ds_state3_machine() local
243 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine()
276 return queued; in ax25_ds_state3_machine()
284 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local
290 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in()
293 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in()
296 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in()
300 return queued; in ax25_ds_frame_in()
Dax25_in.c107 int queued = 0; in ax25_rx_iframe() local
149 queued = 1; in ax25_rx_iframe()
155 return queued; in ax25_rx_iframe()
163 int queued = 0; in ax25_process_rx_frame() local
171 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame()
177 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame()
179 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame()
184 return queued; in ax25_process_rx_frame()
/linux-4.1.27/drivers/gpu/drm/
Ddrm_flip_work.c60 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task()
104 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit()
105 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit()
148 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init()
165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
/linux-4.1.27/block/
Dblk-throttle.c65 struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ member
324 struct list_head *queued) in throtl_qnode_add_bio() argument
328 list_add_tail(&qn->node, queued); in throtl_qnode_add_bio()
337 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() argument
339 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_peek_queued()
342 if (list_empty(queued)) in throtl_peek_queued()
364 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued() argument
367 struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node); in throtl_pop_queued()
370 if (list_empty(queued)) in throtl_pop_queued()
383 list_move_tail(&qn->node, queued); in throtl_pop_queued()
[all …]
Dblk-flush.c171 bool queued = false, kicked; in blk_flush_complete_seq() local
192 queued = blk_flush_queue_rq(rq, true); in blk_flush_complete_seq()
216 return kicked | queued; in blk_flush_complete_seq()
223 bool queued = false; in flush_end_io() local
247 queued |= blk_flush_complete_seq(rq, fq, seq, error); in flush_end_io()
261 if (queued || fq->flush_queue_delayed) { in flush_end_io()
Dcfq-iosched.c117 int queued[2]; member
186 struct blkg_rwstat queued; member
540 if (blkg_rwstat_total(&stats->queued)) in cfqg_stats_set_start_empty_time()
584 blkg_rwstat_total(&stats->queued)); in cfqg_stats_update_avg_queue_size()
647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); in cfqg_stats_update_io_add()
663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); in cfqg_stats_update_io_remove()
1531 blkg_rwstat_init(&stats->queued); in cfqg_stats_init()
1925 .private = offsetof(struct cfq_group, stats.queued),
1967 .private = offsetof(struct cfq_group, stats.queued),
2225 BUG_ON(!cfqq->queued[sync]); in cfq_del_rq_rb()
[all …]
Dblk-mq.c774 int queued; in __blk_mq_run_hw_queue() local
808 queued = 0; in __blk_mq_run_hw_queue()
823 queued++; in __blk_mq_run_hw_queue()
848 if (!queued) in __blk_mq_run_hw_queue()
850 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1))) in __blk_mq_run_hw_queue()
851 hctx->dispatched[ilog2(queued) + 1]++; in __blk_mq_run_hw_queue()
1234 hctx->queued++; in blk_mq_map_request()
Dblk-mq-sysfs.c180 return sprintf(page, "%lu\n", hctx->queued); in blk_mq_hw_sysfs_queued_show()
/linux-4.1.27/virt/kvm/
Dasync_pf.c134 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue()
153 vcpu->async_pf.queued--; in kvm_check_async_pf_completion()
163 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf()
195 vcpu->async_pf.queued++; in kvm_setup_async_pf()
223 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
/linux-4.1.27/fs/xfs/
Dxfs_mru_cache.c114 unsigned int queued; /* work has been queued */ member
215 if (!mru->queued) { in _xfs_mru_cache_list_insert()
216 mru->queued = 1; in _xfs_mru_cache_list_insert()
291 mru->queued = next; in _xfs_mru_cache_reap()
292 if ((mru->queued > 0)) { in _xfs_mru_cache_reap()
398 if (mru->queued) { in xfs_mru_cache_flush()
/linux-4.1.27/Documentation/usb/
Dohci.txt18 - interrupt transfers can be larger, and can be queued
24 types can be queued. That was also true in "usb-ohci", except for interrupt
26 to overhead in IRQ processing. When interrupt transfers are queued, those
Dbulk-streams.txt6 queued at once.
17 Once a buffer has been queued to a stream ring, the device is notified (through
DURB.txt23 queued the requested action.
129 It immediately returns, either with status 0 (request queued) or some
135 - Too many queued ISO transfers (-EAGAIN)
241 earlier, if you always keep at least one URB queued and your completion
Dusbmon.txt250 u32 queued;
254 The member "queued" refers to the number of events currently queued in the
Dehci.txt67 Transfers of all types can be queued. This means that control transfers
/linux-4.1.27/drivers/soc/ti/
DKconfig16 Packets are queued/de-queued by writing/reading descriptor address
/linux-4.1.27/Documentation/networking/
Drds.txt76 a socket. A message is queued when sendmsg is called, and
101 of queued bytes over the SO_SNDSIZE threshold will return
108 Receives a message that was queued to this socket. The sockets
125 incoming message queued to the socket, or a pending notification,
130 there's room on the send queue (ie the number of bytes queued
141 This allows the application to discard all messages queued to a
226 number of bytes queued equals or exceeds rcvbuf then the socket
269 This is then queued for the individual connection and sent by the
273 the transport to the general code and queued by the general code
298 Dropping a connection while packets are queued will cause queued or
Dipvlan.txt40 slave device and packets are switched and queued to the master device to send
48 used before packets are queued on the outbound device. In this mode the slaves
Dtcp.txt72 one that has been queued already. To add a frame we throw it on the end. Ack
94 Frames are queued for output by tcp_write. We do our best to send the frames
Dmultiqueue.txt45 will be queued to the band associated with the hardware queue.
Dnetlink_mmap.txt191 RX ring only: user-space queued the message for later processing, but
281 /* Frame queued to socket receive queue */
Dnetdev-FAQ.txt114 Q: How can I tell what patches are queued up for backporting to the
142 if it is already queued. If not, then send a mail to netdev, listing
Dscaling.txt132 and the packet is queued to the tail of that CPU’s backlog queue. At
134 packets have been queued to their backlog queue. The IPI wakes backlog
135 processing on the remote CPU, and any queued packets are then processed
Dtimestamping.txt132 scheduler. In that case timestamps will be queued onto the error
319 however, the full packet is queued, taking up budget from SO_RCVBUF.
Dppp_generic.txt180 fragment if it doesn't have any fragments currently queued up for it
182 the fragment is queued up for the channel to transmit later. This
Daltera_tse.txt75 intialization. Receive buffers may or may not be queued depending upon the
Dstmmac.txt59 are not queued so the driver has to scan all the descriptors in the ring during
Dip-sysctl.txt91 queued for each unresolved address by other network layers.
97 The maximum number of packets which may be queued for each
666 result in a large amount of packets queued in qdisc/device
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-class-net-queues67 Indicates the current limit of bytes allowed to be queued
77 queued on this network device transmit queue. See
86 queued on this network device transmit queue. Default value is
Dsysfs-ata99 queued: Drive supports queued DSM TRIM
Dsysfs-block-dm32 reasonable merge candidate can be queued on the request
Dsysfs-class-net-statistics127 transmitted packets or all packets that have been queued for
Dsysfs-power195 the sleep state represented by that string is queued up. This
/linux-4.1.27/Documentation/
Dworkqueue.txt32 When a new work item gets queued, the worker begins executing again.
86 off of the queue, one after the other. If no work is queued, the
92 which manages worker-pools and processes the queued work items.
96 worker-pools to serve work items queued on unbound workqueues - the
107 When a work item is queued to a workqueue, the target worker-pool is
111 be queued on the worklist of either normal or highpri worker-pool that
148 on code paths that handle memory reclaim are required to be queued on
171 Work items queued to an unbound wq are served by the special
201 Work items of a highpri wq are queued to the highpri
251 behavior. Work items on such wq are always queued to the unbound
[all …]
DSubmitChecklist92 that it still works with all of the other queued patches and various
Dvgaarbiter.txt183 Airlie finally put this work in shape and queued to Jesse Barnes' PCI tree.
Diostats.txt65 ultimately handed to the disk, and so it will be counted (and queued)
Dkernel-per-CPU-kthreads.txt246 another way of preventing any callbacks from being queued on the
Dkmemcheck.txt369 441 * queued once. Changing the restart behaviour to
Dkernel-parameters.txt3056 Set threshold of queued RCU callbacks beyond which
3060 Set threshold of queued RCU callbacks below which
4000 By default, all work items queued to unbound
Dmemory-barriers.txt2273 queued on the semaphore, by virtue of it having a piece of its stack linked to
2311 Once it has queued itself and dropped the semaphore lock, the waiter does not
/linux-4.1.27/drivers/dma/
Dsirf-dma.c66 struct list_head queued; member
124 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, in sirfsoc_dma_execute()
178 if (!list_empty(&schan->queued)) in sirfsoc_dma_irq()
275 list_move_tail(&sdesc->node, &schan->queued); in sirfsoc_dma_tx_submit()
325 list_splice_tail_init(&schan->queued, &schan->free); in sirfsoc_dma_terminate_all()
429 BUG_ON(!list_empty(&schan->queued)); in sirfsoc_dma_free_chan_resources()
453 if (list_empty(&schan->active) && !list_empty(&schan->queued)) in sirfsoc_dma_issue_pending()
735 INIT_LIST_HEAD(&schan->queued); in sirfsoc_dma_probe()
Dmpc512x_dma.c207 struct list_head queued; member
269 while (!list_empty(&mchan->queued)) { in mpc_dma_execute()
270 mdesc = list_first_entry(&mchan->queued, in mpc_dma_execute()
348 if (!list_empty(&mchan->queued)) in mpc_dma_irq_process()
485 list_move_tail(&mdesc->node, &mchan->queued); in mpc_dma_tx_submit()
571 BUG_ON(!list_empty(&mchan->queued)); in mpc_dma_free_chan_resources()
856 list_splice_tail_init(&mchan->queued, &mchan->free); in mpc_dma_device_terminate_all()
972 INIT_LIST_HEAD(&mchan->queued); in mpc_dma_probe()
Dnbpfaxi.c217 struct list_head queued; member
579 if (list_empty(&chan->queued)) in nbpf_issue_pending()
582 list_splice_tail_init(&chan->queued, &chan->active); in nbpf_issue_pending()
623 list_for_each_entry(desc, &chan->queued, node) in nbpf_tx_status()
651 list_add_tail(&desc->node, &chan->queued); in nbpf_tx_submit()
819 list_splice_init(&chan->queued, &head); in nbpf_chan_idle()
1031 INIT_LIST_HEAD(&chan->queued); in nbpf_alloc_chan_resources()
/linux-4.1.27/net/decnet/
Ddn_nsp_in.c616 int queued = 0; in dn_nsp_otherdata() local
629 queued = 1; in dn_nsp_otherdata()
635 if (!queued) in dn_nsp_otherdata()
641 int queued = 0; in dn_nsp_data() local
655 queued = 1; in dn_nsp_data()
666 if (!queued) in dn_nsp_data()
/linux-4.1.27/drivers/media/platform/
Dfsl-viu.c131 struct list_head queued; member
353 if (list_empty(&vidq->queued)) in restart_video_queue()
355 buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue); in restart_video_queue()
539 if (!list_empty(&vidq->queued)) { in buffer_queue()
543 vidq, &vidq->queued); in buffer_queue()
545 dev, &vidq->queued, vidq->queued.next, in buffer_queue()
546 vidq->queued.prev); in buffer_queue()
547 list_add_tail(&buf->vb.queue, &vidq->queued); in buffer_queue()
573 list_add_tail(&buf->vb.queue, &vidq->queued); in buffer_queue()
1069 } else if (!list_empty(&vidq->queued)) { in viu_activate_next_buf()
[all …]
/linux-4.1.27/net/rds/
Dsend.c785 __be16 dport, int *queued) in rds_send_queue_rm() argument
790 if (*queued) in rds_send_queue_rm()
839 *queued = 1; in rds_send_queue_rm()
844 return *queued; in rds_send_queue_rm()
962 int queued = 0, allocated_mr = 0; in rds_sendmsg() local
1066 dport, &queued)) { in rds_sendmsg()
1082 &queued), in rds_sendmsg()
1084 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); in rds_sendmsg()
/linux-4.1.27/Documentation/video4linux/
Dpxa_camera.txt16 When a buffer is queued (pxa_videobuf_ops->buf_queue), the QCI starts.
19 More buffers can be queued while the QCI is started without halting the
82 - first buffer queued for capture
83 Once a first buffer is queued for capture, the QCI is started, but data
Dvideobuf101 When a buffer is queued for I/O, it is passed to buf_queue(), which should
Dv4l2-framework.txt486 sub-device. After registration events can be queued as usual on the
/linux-4.1.27/crypto/
Dchainiv.c125 int queued; in async_chainiv_schedule_work() local
137 queued = queue_work(kcrypto_wq, &ctx->postponed); in async_chainiv_schedule_work()
138 BUG_ON(!queued); in async_chainiv_schedule_work()
/linux-4.1.27/include/drm/
Ddrm_flip_work.h77 struct list_head queued; member
/linux-4.1.27/Documentation/scsi/
Dhptiop.txt84 All queued requests are handled via inbound/outbound queue port.
121 Non-queued requests (reset/flush etc) can be sent via inbound message
129 All queued requests are handled via inbound/outbound list.
160 Non-queued requests (reset communication/reset/flush etc) can be sent via PCIe
Dlpfc.txt33 In older revisions of the lpfc driver, the driver internally queued i/o
38 the LLDD would simply be queued for a short duration, allowing the device
Dsym53c8xx_2.txt228 maximum number of queued commands up to 32. The Symbios Setup only allows
231 The maximum number of simultaneous tagged commands queued to a device
239 accept more than 64 simultaneous commands. So, using more than 64 queued
416 Maximum number of queued commands (default answer: 32)
418 that can be queued to a device. The maximum supported value is 255.
436 - enable tagged commands, up to 4 tagged commands queued.
450 #tags will be truncated to the max queued commands configuration parameter.
Dscsi_mid_low_api.txt373 scsi_block_requests - prevent further commands being queued to given host
384 scsi_unblock_requests - allow further commands to be queued to given host
475 * scsi_block_requests - prevent further commands being queued to given host
684 * scsi_unblock_requests - allow further commands to be queued to given host
847 * will then be queued on current host during eh.
867 * queued on current host during eh.
885 * queued on current host during eh.
903 * queued on current host during eh.
1215 cmd_per_lun - maximum number of commands that can be queued on devices
1256 commands become queued against the LLD than are indicated by
[all …]
Dncr53c8xx.txt256 maximum number of queued commands up to 32. The Symbios Setup only allows
259 The maximum number of simultaneous tagged commands queued to a device
268 more than 64 simultaneous commands. So, using more than 64 queued commands
577 that can be queued to a device. The maximum supported value is 32.
635 - enable tagged commands, up to 4 tagged commands queued.
700 #tags will be truncated to the max queued commands configuration parameter.
1121 Max number of commands that can be queued to a host.
1124 Max number of commands queued to a host for a device.
Dlibsas.txt314 Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued;
315 0, the task(s) were queued.
DChangeLog.ncr53c8xx358 Count actual number of CCB queued to the controller (future use).
442 - Prepare CCBs for SCSI commands that cannot be queued, instead of
Darcmsr_spec.txt86 ** 0x03 : Reset (Abort all queued Command)
DChangeLog.sym53c8xx19 queued to SCRIPTS. This is not always true, notably after a
DChangeLog.1992-1997730 being queued to more than one host at the same time (used when
DChangeLog.lpfc759 * If we rcv a plogi on a NPort queued up for discovery, clear the
/linux-4.1.27/drivers/isdn/pcbit/
Ddrv.c320 chan->queued = 0; in pcbit_block_timer()
348 if (chan->queued >= MAX_QUEUED) in pcbit_xmit()
353 chan->queued); in pcbit_xmit()
373 chan->queued++; in pcbit_xmit()
509 if (chan->queued == MAX_QUEUED) { in pcbit_l3_receive()
515 chan->queued--; in pcbit_l3_receive()
Dpcbit.h28 unsigned char queued; /* unacked data messages */ member
Dlayer2.c468 if (chan->queued) { in pcbit_fake_conf()
469 chan->queued--; in pcbit_fake_conf()
/linux-4.1.27/drivers/tty/serial/
Damba-pl011.c143 bool queued; member
422 if (uap->dmatx.queued) in pl011_dma_tx_callback()
441 uap->dmatx.queued = false; in pl011_dma_tx_callback()
481 uap->dmatx.queued = false; in pl011_dma_tx_refill()
513 uap->dmatx.queued = false; in pl011_dma_tx_refill()
522 uap->dmatx.queued = false; in pl011_dma_tx_refill()
543 uap->dmatx.queued = true; in pl011_dma_tx_refill()
576 if (uap->dmatx.queued) { in pl011_dma_tx_irq()
602 if (uap->dmatx.queued) { in pl011_dma_tx_stop()
627 if (!uap->dmatx.queued) { in pl011_dma_tx_start()
[all …]
/linux-4.1.27/kernel/sched/
Didle_task.c54 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) in task_tick_idle() argument
Dcore.c1186 int running, queued; in wait_task_inactive() local
1224 queued = task_on_rq_queued(p); in wait_task_inactive()
1256 if (unlikely(queued)) { in wait_task_inactive()
3000 int oldprio, queued, running, enqueue_flag = 0; in rt_mutex_setprio() local
3029 queued = task_on_rq_queued(p); in rt_mutex_setprio()
3031 if (queued) in rt_mutex_setprio()
3073 if (queued) in rt_mutex_setprio()
3084 int old_prio, delta, queued; in set_user_nice() local
3105 queued = task_on_rq_queued(p); in set_user_nice()
3106 if (queued) in set_user_nice()
[all …]
Dstop_task.c77 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) in task_tick_stop() argument
Dfair.c3349 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) in entity_tick() argument
3368 if (queued) { in entity_tick()
7814 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) in task_tick_fair() argument
7821 entity_tick(cfs_rq, se, queued); in task_tick_fair()
7997 static void task_move_group_fair(struct task_struct *p, int queued) in task_move_group_fair() argument
8027 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING)) in task_move_group_fair()
8028 queued = 1; in task_move_group_fair()
8030 if (!queued) in task_move_group_fair()
8034 if (!queued) { in task_move_group_fair()
Ddeadline.c1150 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
1159 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
Dsched.h1201 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
Drt.c2246 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
/linux-4.1.27/drivers/usb/dwc3/
Dgadget.h75 req->queued = true; in dwc3_gadget_move_request_queued()
Dcore.h618 unsigned queued:1; member
Dgadget.c240 if (req->queued) { in dwc3_gadget_giveback()
254 req->queued = false; in dwc3_gadget_giveback()
/linux-4.1.27/Documentation/block/
Dstat.txt44 already-queued I/O request.
74 had I/O requests queued.
Dbiodoc.txt787 Returns 1 if the queue can accept a new queued command, 0 if we are
1036 multi-page bios being queued in one shot, we may not need to wait to merge
/linux-4.1.27/drivers/isdn/act2000/
Dact2000.h122 short queued; /* User-Data Bytes in TX queue */ member
Dcapi.c486 chan->queued = 0; in actcapi_disconnect_b3_resp()
609 chan->queued -= m->msg.data_b3_req.datalen; in handle_ack()
613 if (chan->queued < 0) in handle_ack()
614 chan->queued = 0; in handle_ack()
Dmodule.c432 if ((chan->queued + len) >= ACT2000_MAX_QUEUED) in act2000_sendbuf()
465 chan->queued += len; in act2000_sendbuf()
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
Dosc_io.c113 int queued = 0; in osc_io_submit() local
173 if (++queued == max_pages) { in osc_io_submit()
174 queued = 0; in osc_io_submit()
182 if (queued > 0) in osc_io_submit()
/linux-4.1.27/net/lapb/
Dlapb_in.c254 int queued = 0; in lapb_state3_machine() local
408 queued = 1; in lapb_state3_machine()
470 if (!queued) in lapb_state3_machine()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
Domap_crtc.c334 apply->queued = false; in apply_worker()
366 if (apply->queued) in omap_crtc_apply()
369 apply->queued = true; in omap_crtc_apply()
Domap_drv.h63 bool queued; member
/linux-4.1.27/arch/x86/kernel/apic/
Dapic.c1213 unsigned int value, queued; in setup_local_APIC() local
1283 queued = 0; in setup_local_APIC()
1285 queued |= apic_read(APIC_IRR + i*0x10); in setup_local_APIC()
1301 if (queued) { in setup_local_APIC()
1308 } while (queued && max_loops > 0); in setup_local_APIC()
/linux-4.1.27/drivers/mtd/
Dftl.c611 int queued, ret; in reclaim_block() local
618 queued = 0; in reclaim_block()
629 queued = 1; in reclaim_block()
649 if (queued) { in reclaim_block()
/linux-4.1.27/Documentation/filesystems/caching/
Dfscache.txt246 ok=N Number of attr changed requests queued
284 enq=N Number of times async ops queued for processing
287 dfr=N Number of async ops queued for deferred release
398 S Show objects that have work queued
399 s Show objects that don't have work queued
Dobject.txt123 is not masked, the object will be queued for processing (by calling
201 operations the netfs has queued for an object. If creation failed, the
Dbackend-api.txt422 queued and 0 returned. When the read finishes, fscache_end_io() should be
431 queued and released when I/O on the page has been formally ended.
Dnetfs-api.txt869 that, operations will be queued again behind the invalidation operation.
/linux-4.1.27/fs/ocfs2/cluster/
Dheartbeat.c695 int queued = 0; in o2hb_shutdown_slot() local
713 queued = 1; in o2hb_shutdown_slot()
718 if (queued) in o2hb_shutdown_slot()
779 int queued = 0; in o2hb_check_slot() local
873 queued = 1; in o2hb_check_slot()
925 queued = 1; in o2hb_check_slot()
941 if (queued) in o2hb_check_slot()
/linux-4.1.27/drivers/mailbox/
DKconfig5 on-chip processors through queued messages and interrupt driven
/linux-4.1.27/net/sunrpc/
Dsvc_xprt.c328 bool queued = false; in svc_xprt_do_enqueue() local
363 if (!queued) { in svc_xprt_do_enqueue()
391 if (!queued) { in svc_xprt_do_enqueue()
392 queued = true; in svc_xprt_do_enqueue()
/linux-4.1.27/include/linux/
Dblk-mq.h50 unsigned long queued; member
Dkvm_host.h250 u32 queued; member
/linux-4.1.27/Documentation/sound/alsa/
Dtimestamping.txt20 the ring buffer and the amount of queued samples.
27 queued samples have been played out.
49 |< codec delay >|<--hw delay-->|<queued samples>|<---avail->|
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
/linux-4.1.27/net/netfilter/
Dnfnetlink_queue_core.c640 unsigned int queued; in nfqnl_enqueue_packet() local
678 queued = 0; in nfqnl_enqueue_packet()
686 queued++; in nfqnl_enqueue_packet()
692 if (queued) { in nfqnl_enqueue_packet()
/linux-4.1.27/drivers/tty/hvc/
Dhvc_iucv.c463 int queued; in hvc_iucv_put_chars() local
472 queued = hvc_iucv_queue(priv, buf, count); in hvc_iucv_put_chars()
475 return queued; in hvc_iucv_put_chars()
/linux-4.1.27/Documentation/filesystems/
Dgfs2-glocks.txt16 The gl_holders list contains all the queued lock requests (not
20 are queued, except for those marked LM_FLAG_PRIORITY which are
215 queue - Number of glock requests queued (qcnt in glstats file)
Ddnotify.txt34 (SIGRTMIN + <n>) so that the notifications may be queued. This is
Dfuse.txt161 this flag is set, an INTERRUPT request is queued.
164 request is queued.
167 userspace filesystem will receive queued INTERRUPTs before any others.
Dproc.txt241 SigQ number of signals queued/max. number for queue
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Ddebug.h170 u32 queued; member
Ddebug.c598 PR("MPDUs Queued: ", queued); in read_file_xmit()
1266 AWDATA(queued); in ath9k_get_et_stats()
Dxmit.c2103 TX_STAT_INC(txq->axq_qnum, queued); in ath_tx_send_normal()
2482 TX_STAT_INC(txctl.txq->axq_qnum, queued); in ath_tx_cabq()
/linux-4.1.27/drivers/crypto/caam/
DKconfig73 more descriptor completions are queued without reaching the count
/linux-4.1.27/drivers/media/usb/tm6000/
Dtm6000.h94 struct list_head queued; member
Dtm6000-video.c1376 "queued=%d\n", list_empty(&dev->vidq.queued)); in __tm6000_open()
1661 INIT_LIST_HEAD(&dev->vidq.queued); in tm6000_v4l2_register()
/linux-4.1.27/drivers/spi/
Dspi.c1143 master->queued = true; in spi_master_initialize_queue()
1647 if (master->queued) { in spi_unregister_master()
1666 if (!master->queued) in spi_master_suspend()
1681 if (!master->queued) in spi_master_resume()
/linux-4.1.27/Documentation/sysctl/
Dnet.txt150 Maximum number of packets, queued on the INPUT side, when the interface
195 unix_dgram_qlen limits the max number of datagrams queued in Unix domain
Dkernel.txt729 of POSIX realtime (queued) signals that can be outstanding
732 rtsig-nr shows the number of RT signals currently queued.
/linux-4.1.27/drivers/net/wireless/cw1200/
Dwsm.c1591 int queued; in cw1200_get_prio_queue() local
1596 queued = cw1200_queue_get_num_queued(&priv->tx_queue[i], in cw1200_get_prio_queue()
1598 if (!queued) in cw1200_get_prio_queue()
1600 *total += queued; in cw1200_get_prio_queue()
/linux-4.1.27/include/linux/spi/
Dspi.h416 bool queued; member
/linux-4.1.27/drivers/block/
Dxen-blkfront.c614 int queued; in do_blkif_request() local
618 queued = 0; in do_blkif_request()
647 queued++; in do_blkif_request()
650 if (queued != 0) in do_blkif_request()
/linux-4.1.27/net/mac80211/
Dtx.c1065 bool queued = false; in ieee80211_tx_prep_agg() local
1105 queued = true; in ieee80211_tx_prep_agg()
1123 return queued; in ieee80211_tx_prep_agg()
1185 bool queued; in ieee80211_tx_prepare() local
1187 queued = ieee80211_tx_prep_agg(tx, skb, info, in ieee80211_tx_prepare()
1190 if (unlikely(queued)) in ieee80211_tx_prepare()
Dcfg.c2525 bool queued = false; in ieee80211_start_roc_work() local
2616 queued = true; in ieee80211_start_roc_work()
2632 queued = true; in ieee80211_start_roc_work()
2637 queued = true; in ieee80211_start_roc_work()
2650 queued = true; in ieee80211_start_roc_work()
2666 if (!queued) in ieee80211_start_roc_work()
/linux-4.1.27/Documentation/s390/
DCommonIO94 A write request to this file is blocked until all queued cio actions are
/linux-4.1.27/Documentation/devicetree/bindings/mailbox/
Domap-mailbox.txt5 using a queued mailbox interrupt mechanism. The IP block is external to the
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
Dtx.c1429 goto queued; in carl9170_tx_ampdu_queue()
1437 goto queued; in carl9170_tx_ampdu_queue()
1442 queued: in carl9170_tx_ampdu_queue()
/linux-4.1.27/Documentation/RCU/
Drcubarrier.txt76 synchronize_rcu(), in particular, if there are no RCU callbacks queued
199 that RCU callbacks are never reordered once queued on one of the per-CPU
Dtrace.txt143 "N" Indicates that there are callbacks queued that are not
148 "R" Indicates that there are callbacks queued that are
151 "W" Indicates that there are callbacks queued that are
154 "D" Indicates that there are callbacks queued that have
/linux-4.1.27/Documentation/input/
Datarikbd.txt133 scanning the joysticks (samples are not queued).
143 are not queued).
367 joystick events are also queued.
372 causes any accumulated motion to be immediately queued as packets, if the
/linux-4.1.27/drivers/usb/serial/
Dio_ti.c1666 int queued; in edge_tty_recv() local
1668 queued = tty_insert_flip_string(&port->port, data, length); in edge_tty_recv()
1669 if (queued < length) in edge_tty_recv()
1671 __func__, length - queued); in edge_tty_recv()
/linux-4.1.27/drivers/scsi/
DKconfig697 int "maximum number of queued commands"
701 This specifies how many SCSI commands can be maximally queued for
1073 int "Maximum number of queued commands"
1078 that can be queued to any device, when tagged command queuing is
1079 possible. The driver supports up to 256 queued commands per device.
1168 int "maximum number of queued commands"
1173 that can be queued to any device, when tagged command queuing is
1416 int "maximum number of queued commands"
1420 This specifies how many SCSI commands can be maximally queued for
Dncr53c8xx.c1580 u_char queued; member
4466 cp->queued = 1; in ncr_put_start_queue()
5102 if (cp->queued) { in ncr_ccb_skipped()
5106 if (cp->queued) { in ncr_ccb_skipped()
5108 cp->queued = 0; in ncr_ccb_skipped()
7291 if (cp->queued) { in ncr_free_ccb()
7297 if (cp->queued) { in ncr_free_ccb()
7299 cp->queued = 0; in ncr_free_ccb()
/linux-4.1.27/Documentation/cris/
DREADME106 block: queued sectors max/low 9109kB/3036kB, 64 slots per queue
/linux-4.1.27/Documentation/hid/
Dhidraw.txt41 read() will read a queued report received from the HID device. On USB
Duhid.txt120 read() will return a queued output report. No reaction is required to any of
/linux-4.1.27/Documentation/power/
Druntime_pm.txt246 - if set, there's a pending request (i.e. a work item queued up into pm_wq)
340 success or error code if the request has not been queued up
345 expired then the work item is queued up immediately
351 item is queued up immediately); returns 0 on success, 1 if the device's PM
353 hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
361 error code if the request hasn't been queued up
Dpci.txt1002 queued by the PM core (for example, after processing a request to resume a
/linux-4.1.27/Documentation/nfc/
Dnfc-hci.txt187 the current llc. In case of shdlc, the frame is queued in shdlc rx queue.
250 queued to HCI rx_queue and will be dispatched from HCI rx worker
/linux-4.1.27/Documentation/locking/
Dmutex-design.txt60 soon. The mutex spinners are queued up using MCS lock so that only
Dww-mutex-design.txt23 evict other buffers which are already queued up to the GPU), but for a
/linux-4.1.27/drivers/usb/mon/
Dmon_bin.c127 u32 queued; member
1107 if (put_user(nevents, &sp->queued)) in mon_bin_ioctl()
/linux-4.1.27/drivers/media/v4l2-core/
Dvideobuf2-core.c2767 unsigned int queued:1; member
2910 fileio->bufs[i].queued = 1; in __vb2_init_fileio()
3030 buf->queued = 0; in __vb2_perform_fileio()
3108 buf->queued = 1; in __vb2_perform_fileio()
/linux-4.1.27/Documentation/dmaengine/
Dclient.txt158 queue is started and subsequent ones queued up.
Dprovider.txt306 currently queued transfers, but only on subsequent ones
376 that can be queued to buffers before being flushed to
/linux-4.1.27/Documentation/cgroups/
Dblkio-controller.txt245 - Total number of requests queued up at any given instant for this
258 (i.e., went from 0 to 1 request queued) to get a timeslice for one of
Dmemory.txt496 writeback - # of bytes of file/anon cache that are queued for syncing to
/linux-4.1.27/drivers/media/usb/cx231xx/
Dcx231xx-cards.c1329 INIT_LIST_HEAD(&dev->video_mode.vidq.queued); in cx231xx_init_dev()
1333 INIT_LIST_HEAD(&dev->vbi_mode.vidq.queued); in cx231xx_init_dev()
Dcx231xx.h248 struct list_head queued; member
/linux-4.1.27/Documentation/crypto/
Dasync-tx-api.txt77 has been queued to execute asynchronously. Descriptors are recycled
/linux-4.1.27/net/ipv4/
Dtcp_input.c5703 int queued = 0; in tcp_rcv_state_process() local
5749 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); in tcp_rcv_state_process()
5750 if (queued >= 0) in tcp_rcv_state_process()
5751 return queued; in tcp_rcv_state_process()
5949 queued = 1; in tcp_rcv_state_process()
5959 if (!queued) { in tcp_rcv_state_process()
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic7xxx.reg1030 * Input queue for queued SCBs (commands that the seqencer has yet to start)
1041 * Number of queued SCBs
1075 * Number of queued SCBs in the Out FIFO
Daic79xx.seq361 * target queue is empty, the SCB can be queued
366 * could allow commands to be queued out of order.
369 * SCB can be queued to the waiting for selection
382 * it can be queued. Since the SCB_SCSIID of the
384 * queued SCB, there is no need to restore the SCBID
Daic79xx.reg4048 * queued to the controller, it cannot enter
4050 * selections for any previously queued
4052 * the wait, the MK_MESSAGE SCB is queued
Daic7xxx.seq64 * command for which a second SCB has been queued. The sequencer will
104 * We have at least one queued SCB now and we don't have any
/linux-4.1.27/drivers/atm/
Deni.c152 static int tx_complete = 0,dma_complete = 0,queued = 0,requeued = 0, variable
1177 queued++; in do_tx()
1464 tx_complete,dma_complete,queued,requeued,submitted,backlogged,
/linux-4.1.27/Documentation/timers/
Dhrtimers.txt97 queued timers, without having to walk the rbtree.
/linux-4.1.27/Documentation/scheduler/
Dsched-design-CFS.txt65 rq->cfs.load value, which is the sum of the weights of the tasks queued on the
/linux-4.1.27/drivers/block/drbd/
Ddrbd_worker.c627 int queued = sk->sk_wmem_queued; in make_resync_request() local
629 if (queued > sndbuf / 2) { in make_resync_request()
/linux-4.1.27/Documentation/ide/
DChangeLog.ide-tape.1995-200236 * from the other device can be queued and ide.c will
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
Dcfg80211.c3186 bool more_data, queued; in ath6kl_mgmt_tx() local
3224 queued = ath6kl_mgmt_powersave_ap(vif, id, freq, wait, buf, len, in ath6kl_mgmt_tx()
3226 if (queued) in ath6kl_mgmt_tx()
/linux-4.1.27/Documentation/spi/
Dspi-summary569 method is not used on queued controllers and must be NULL if
577 SPI subsystem, just implement the queued methods specified above. Using
/linux-4.1.27/Documentation/m68k/
Dkernel-options.txt584 This is the maximum number of SCSI commands queued internally to the
/linux-4.1.27/init/
DKconfig644 they have RCU callbacks queued, and prevents RCU from waking
/linux-4.1.27/
DMAINTAINERS616 P: Andres Salomon <dilinger@queued.net>