qlen 899 crypto/algapi.c queue->qlen = 0; qlen 909 crypto/algapi.c if (unlikely(queue->qlen >= queue->max_qlen)) { qlen 919 crypto/algapi.c queue->qlen++; qlen 931 crypto/algapi.c if (unlikely(!queue->qlen)) qlen 934 crypto/algapi.c queue->qlen--; qlen 120 crypto/cryptd.c BUG_ON(cpu_queue->queue.qlen); qlen 183 crypto/cryptd.c if (cpu_queue->queue.qlen) qlen 303 drivers/bluetooth/hci_bcsp.c if (bcsp->unack.qlen < BCSP_TXWINSIZE) { qlen 696 drivers/bluetooth/hci_bcsp.c BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); qlen 161 drivers/bluetooth/hci_h5.c BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); qlen 738 drivers/bluetooth/hci_h5.c if (h5->unack.qlen >= h5->tx_win) qlen 39 drivers/crypto/cavium/cpt/cptpf.h u32 qlen; qlen 117 drivers/crypto/cavium/cpt/cptpf_mbox.c vfx->qlen = mbx.data; qlen 118 drivers/crypto/cavium/cpt/cptpf_mbox.c cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen); qlen 86 drivers/crypto/cavium/cpt/cptvf.h u32 qlen; /* Queue length */ qlen 99 drivers/crypto/cavium/cpt/cptvf_main.c pqinfo->qlen = 0; qlen 103 drivers/crypto/cavium/cpt/cptvf_main.c static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen, qlen 112 drivers/crypto/cavium/cpt/cptvf_main.c pqinfo->qlen = qlen; qlen 114 drivers/crypto/cavium/cpt/cptvf_main.c size = (qlen * sizeof(struct pending_entry)); qlen 139 drivers/crypto/cavium/cpt/cptvf_main.c static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) qlen 147 drivers/crypto/cavium/cpt/cptvf_main.c ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues); qlen 205 drivers/crypto/cavium/cpt/cptvf_main.c u32 qlen) qlen 215 drivers/crypto/cavium/cpt/cptvf_main.c cptvf->qsize = min(qlen, cqinfo->qchunksize) * qlen 218 drivers/crypto/cavium/cpt/cptvf_main.c q_size = qlen * cqinfo->cmd_size; qlen 279 drivers/crypto/cavium/cpt/cptvf_main.c static int init_command_queues(struct cpt_vf *cptvf, u32 qlen) qlen 286 drivers/crypto/cavium/cpt/cptvf_main.c qlen); qlen 315 drivers/crypto/cavium/cpt/cptvf_main.c static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) qlen 326 drivers/crypto/cavium/cpt/cptvf_main.c ret = init_command_queues(cptvf, qlen); qlen 333 drivers/crypto/cavium/cpt/cptvf_main.c ret = init_pending_queues(cptvf, qlen, nr_queues); qlen 15 drivers/crypto/cavium/cpt/cptvf_reqmanager.c int qlen) qlen 26 drivers/crypto/cavium/cpt/cptvf_reqmanager.c if (unlikely(q->rear == qlen)) qlen 39 drivers/crypto/cavium/cpt/cptvf_reqmanager.c if (unlikely(queue->front == pqinfo->qlen)) qlen 491 drivers/crypto/cavium/cpt/cptvf_reqmanager.c pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen); qlen 234 drivers/crypto/cavium/nitrox/nitrox_dev.h u16 qlen; qlen 142 drivers/crypto/cavium/nitrox/nitrox_hal.c pkt_in_rsize.s.rsize = ndev->qlen; qlen 384 drivers/crypto/cavium/nitrox/nitrox_hal.c qsize.host_queue_size = ndev->qlen; qlen 29 drivers/crypto/cavium/nitrox/nitrox_lib.c cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; qlen 48 drivers/crypto/cavium/nitrox/nitrox_main.c static unsigned int qlen = DEFAULT_CMD_QLEN; qlen 49 drivers/crypto/cavium/nitrox/nitrox_main.c module_param(qlen, uint, 0644); qlen 50 drivers/crypto/cavium/nitrox/nitrox_main.c MODULE_PARM_DESC(qlen, "Command queue length - default 2048"); qlen 487 drivers/crypto/cavium/nitrox/nitrox_main.c ndev->qlen = qlen; qlen 264 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) qlen 266 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c if (atomic_inc_return(&cmdq->pending_count) > qlen) { qlen 307 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c cmdq->write_idx = incr_index(idx, 1, ndev->qlen); qlen 328 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c if (unlikely(cmdq_full(cmdq, ndev->qlen))) { qlen 354 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c if (unlikely(cmdq_full(cmdq, ndev->qlen))) { qlen 768 drivers/crypto/chelsio/chtls/chtls_io.c int qlen = skb_queue_len(&csk->txq); qlen 770 drivers/crypto/chelsio/chtls/chtls_io.c if (likely(qlen)) { qlen 783 drivers/crypto/chelsio/chtls/chtls_io.c if (qlen == 1 && qlen 1821 drivers/crypto/hifn_795x.c dev->success, dev->queue.qlen, dev->queue.max_qlen, qlen 2077 drivers/crypto/hifn_795x.c if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) qlen 2457 drivers/crypto/hifn_795x.c if (dev->started < HIFN_QUEUE_LENGTH && dev->queue.qlen) qlen 1148 drivers/input/misc/wistron_btns.c u8 qlen; qlen 1152 drivers/input/misc/wistron_btns.c qlen = CMOS_READ(cmos_address); qlen 1153 drivers/input/misc/wistron_btns.c if (qlen == 0) qlen 39 drivers/md/dm-queue-length.c atomic_t qlen; /* the number of in-flight I/Os */ qlen 100 drivers/md/dm-queue-length.c DMEMIT("%d ", atomic_read(&pi->qlen)); qlen 149 drivers/md/dm-queue-length.c atomic_set(&pi->qlen, 0); qlen 200 drivers/md/dm-queue-length.c (atomic_read(&pi->qlen) < atomic_read(&best->qlen))) qlen 203 drivers/md/dm-queue-length.c if (!atomic_read(&best->qlen)) qlen 224 drivers/md/dm-queue-length.c atomic_inc(&pi->qlen); qlen 234 drivers/md/dm-queue-length.c atomic_dec(&pi->qlen); qlen 52 drivers/misc/hpilo.c int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; qlen 53 drivers/misc/hpilo.c return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; qlen 107 drivers/net/caif/caif_hsi.c if (cfhsi->qhead[i].qlen) qlen 112 drivers/net/caif/caif_hsi.c if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) qlen 258 drivers/net/caif/caif_serial.c if (ser->head.qlen <= SEND_QUEUE_LOW && qlen 277 drivers/net/caif/caif_serial.c if (ser->head.qlen > SEND_QUEUE_HIGH && qlen 445 drivers/net/caif/caif_spi.c if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark && qlen 511 drivers/net/caif/caif_spi.c cfspi->qhead.qlen > cfspi->qd_high_mark && qlen 65 drivers/net/ethernet/alacritech/slicoss.c static inline int slic_next_queue_idx(unsigned int idx, unsigned int qlen) qlen 67 drivers/net/ethernet/alacritech/slicoss.c return (idx + 1) & (qlen - 1); qlen 72 drivers/net/ethernet/alacritech/slicoss.c unsigned int qlen) qlen 75 drivers/net/ethernet/alacritech/slicoss.c return (qlen - (put_idx - done_idx) - 1); qlen 2380 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->sendq.qlen == 1) qlen 131 drivers/net/ethernet/chelsio/cxgb4/srq.c e->qlen = SRQT_QLEN_G(be32_to_cpu(rpl->qlen_qbase)); qlen 46 drivers/net/ethernet/chelsio/cxgb4/srq.h u8 qlen; qlen 685 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 }, qlen 703 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 }, qlen 23 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u16 qlen; qlen 63 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u16 qlen; qlen 3159 drivers/net/ethernet/intel/i40e/i40e_main.c tx_ctx.qlen = ring->count; qlen 3297 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.qlen = ring->count; qlen 571 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c tx_ctx.qlen = info->ring_len; qlen 637 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.qlen = info->ring_len; qlen 1113 drivers/net/ethernet/intel/ice/ice_common.c ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), qlen 1180 drivers/net/ethernet/intel/ice/ice_common.c ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), qlen 275 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h u16 qlen; qlen 442 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h u32 qlen; /* bigger than needed, see above for reason */ qlen 32 drivers/net/ethernet/intel/ice/ice_lib.c rlan_ctx.qlen = ring->count; qlen 126 drivers/net/ethernet/intel/ice/ice_lib.c tlan_ctx->qlen = ring->count; qlen 466 drivers/net/ethernet/netronome/nfp/abm/qdisc.c qstats->qlen += new->backlog_pkts - old->backlog_pkts; qlen 524 drivers/net/ethernet/netronome/nfp/ccm_mbox.c if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) { qlen 3681 drivers/net/ethernet/sun/niu.c int qlen, rcr_done = 0, work_done = 0; qlen 3687 drivers/net/ethernet/sun/niu.c qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN; qlen 3690 drivers/net/ethernet/sun/niu.c qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN); qlen 3697 drivers/net/ethernet/sun/niu.c __func__, rp->rx_channel, (unsigned long long)stat, qlen); qlen 3700 drivers/net/ethernet/sun/niu.c qlen = min(qlen, budget); qlen 3701 drivers/net/ethernet/sun/niu.c while (work_done < qlen) { qlen 3721 drivers/net/ethernet/sun/niu.c if (qlen > 10) qlen 1782 drivers/net/ethernet/toshiba/tc35815.c int qlen = (lp->tfd_start + TX_FD_NUM qlen 1792 drivers/net/ethernet/toshiba/tc35815.c if (lp->lstats.max_tx_qlen < qlen) qlen 1793 drivers/net/ethernet/toshiba/tc35815.c lp->lstats.max_tx_qlen = qlen; qlen 1599 drivers/net/ppp/ppp_generic.c if (ppp->file.rq.qlen > PPP_MAX_RQLEN) qlen 2030 drivers/net/ppp/ppp_generic.c while (pch->file.rq.qlen > PPP_MAX_RQLEN && qlen 2183 drivers/net/ppp/ppp_generic.c while (ppp->file.rq.qlen > PPP_MAX_RQLEN && qlen 442 drivers/net/usb/usbnet.c if (dev->done.qlen == 1) qlen 1465 drivers/net/usb/usbnet.c if (dev->txq.qlen >= TX_QLEN (dev)) qlen 1498 drivers/net/usb/usbnet.c for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { qlen 1549 drivers/net/usb/usbnet.c if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) qlen 1559 drivers/net/usb/usbnet.c int temp = dev->rxq.qlen; qlen 1564 drivers/net/usb/usbnet.c if (temp != dev->rxq.qlen) qlen 1567 drivers/net/usb/usbnet.c temp, dev->rxq.qlen); qlen 1568 drivers/net/usb/usbnet.c if (dev->rxq.qlen < RX_QLEN(dev)) qlen 1571 drivers/net/usb/usbnet.c if (dev->txq.qlen < TX_QLEN (dev)) qlen 1855 drivers/net/usb/usbnet.c if (dev->txq.qlen && PMSG_IS_AUTO(message)) { qlen 1921 drivers/net/usb/usbnet.c if (!(dev->txq.qlen >= TX_QLEN(dev))) qlen 394 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c if (!pktlist->qlen) qlen 426 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c target_list->qlen); qlen 568 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c addr, pktq->qlen); qlen 577 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c if (pktq->qlen == 1) qlen 639 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); qlen 648 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c if (pktq->qlen == 1 || !sdiodev->sg_support) { qlen 1368 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c int qlen; qlen 1381 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); qlen 1382 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || qlen 1383 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c ((qlen) && (atomic_read(&commonring->outstanding_tx) < qlen 1674 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c bus->glom.qlen, pfirst, pfirst->data, qlen 2183 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (bus->txglom && pktq->qlen > 1) { qlen 2285 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP; qlen 213 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c len += pq->q[prec].skblist.qlen; qlen 78 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen; qlen 83 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].max - pq->q[prec].skblist.qlen; qlen 88 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen >= pq->q[prec].max; qlen 601 drivers/net/wireless/realtek/rtlwifi/usb.c unsigned int qlen; qlen 611 drivers/net/wireless/realtek/rtlwifi/usb.c qlen = skb_queue_len(&rtlusb->rx_queue); qlen 612 drivers/net/wireless/realtek/rtlwifi/usb.c if (qlen >= __RX_SKB_MAX_QUEUED) { qlen 614 drivers/net/wireless/realtek/rtlwifi/usb.c qlen); qlen 94 drivers/nfc/st-nci/ndlc.c if (ndlc->send_q.qlen) qlen 96 drivers/nfc/st-nci/ndlc.c ndlc->send_q.qlen, ndlc->ack_pending_q.qlen); qlen 98 drivers/nfc/st-nci/ndlc.c while (ndlc->send_q.qlen) { qlen 153 drivers/nfc/st-nci/ndlc.c if (ndlc->rcv_q.qlen) qlen 154 drivers/nfc/st-nci/ndlc.c pr_debug("rcvQlen=%d\n", ndlc->rcv_q.qlen); qlen 407 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (port->fcoe_pending_queue.qlen) qlen 473 drivers/scsi/bnx2fc/bnx2fc_fcoe.c if (bg->fcoe_rx_list.qlen == 1) qlen 552 drivers/scsi/fcoe/fcoe.c if (port->fcoe_pending_queue.qlen) qlen 409 drivers/scsi/fcoe/fcoe_transport.c while (port->fcoe_pending_queue.qlen) { qlen 411 drivers/scsi/fcoe/fcoe_transport.c port->fcoe_pending_queue.qlen++; qlen 421 drivers/scsi/fcoe/fcoe_transport.c port->fcoe_pending_queue.qlen--; qlen 425 drivers/scsi/fcoe/fcoe_transport.c port->fcoe_pending_queue.qlen--; qlen 428 drivers/scsi/fcoe/fcoe_transport.c if (port->fcoe_pending_queue.qlen < port->min_queue_depth) qlen 430 drivers/scsi/fcoe/fcoe_transport.c if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) qlen 434 drivers/scsi/fcoe/fcoe_transport.c if (port->fcoe_pending_queue.qlen > port->max_queue_depth) qlen 453 drivers/tty/serial/jsm/jsm_cls.c int qlen; qlen 478 drivers/tty/serial/jsm/jsm_cls.c qlen = uart_circ_chars_pending(circ); qlen 481 drivers/tty/serial/jsm/jsm_cls.c n = min(n, qlen); qlen 480 drivers/tty/serial/jsm/jsm_neo.c int qlen; qlen 526 drivers/tty/serial/jsm/jsm_neo.c qlen = uart_circ_chars_pending(circ); qlen 529 drivers/tty/serial/jsm/jsm_neo.c n = min(n, qlen); qlen 299 drivers/tty/serial/men_z135_uart.c int qlen; qlen 316 drivers/tty/serial/men_z135_uart.c qlen = uart_circ_chars_pending(xmit); qlen 317 drivers/tty/serial/men_z135_uart.c if (qlen <= 0) qlen 331 drivers/tty/serial/men_z135_uart.c txfree, qlen); qlen 338 drivers/tty/serial/men_z135_uart.c if (align && qlen >= 3 && BYTES_TO_ALIGN(wptr)) qlen 340 drivers/tty/serial/men_z135_uart.c else if (qlen > txfree) qlen 343 drivers/tty/serial/men_z135_uart.c n = qlen; qlen 52 drivers/usb/gadget/function/f_hid.c unsigned int qlen; qlen 690 drivers/usb/gadget/function/f_hid.c for (i = 0; i < hidg->qlen && status == 0; i++) { qlen 1127 drivers/usb/gadget/function/f_hid.c hidg->qlen = 4; qlen 34 drivers/usb/gadget/function/f_loopback.c unsigned qlen; qlen 323 drivers/usb/gadget/function/f_loopback.c for (i = 0; i < loop->qlen && result == 0; i++) { qlen 441 drivers/usb/gadget/function/f_loopback.c loop->qlen = lb_opts->qlen; qlen 442 drivers/usb/gadget/function/f_loopback.c if (!loop->qlen) qlen 443 drivers/usb/gadget/function/f_loopback.c loop->qlen = 32; qlen 479 drivers/usb/gadget/function/f_loopback.c result = sprintf(page, "%d\n", opts->qlen); qlen 502 drivers/usb/gadget/function/f_loopback.c opts->qlen = num; qlen 509 drivers/usb/gadget/function/f_loopback.c CONFIGFS_ATTR(f_lb_opts_, qlen); qlen 579 drivers/usb/gadget/function/f_loopback.c lb_opts->qlen = GZERO_QLEN; qlen 95 drivers/usb/gadget/function/f_midi.c unsigned int buflen, qlen; qlen 396 drivers/usb/gadget/function/f_midi.c for (i = 0; i < midi->qlen && err == 0; i++) { qlen 1131 drivers/usb/gadget/function/f_midi.c F_MIDI_OPT(qlen, false, 0); qlen 1232 drivers/usb/gadget/function/f_midi.c opts->qlen = 32; qlen 1324 drivers/usb/gadget/function/f_midi.c midi->qlen = opts->qlen; qlen 1328 drivers/usb/gadget/function/f_midi.c status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL); qlen 581 drivers/usb/gadget/function/f_sourcesink.c int i, size, qlen, status = 0; qlen 599 drivers/usb/gadget/function/f_sourcesink.c qlen = ss->iso_qlen; qlen 602 drivers/usb/gadget/function/f_sourcesink.c qlen = ss->bulk_qlen; qlen 606 drivers/usb/gadget/function/f_sourcesink.c for (i = 0; i < qlen; i++) { qlen 24 drivers/usb/gadget/function/g_zero.h unsigned qlen; qlen 53 drivers/usb/gadget/function/g_zero.h unsigned qlen; qlen 1073 drivers/usb/gadget/function/u_ether.c result = alloc_requests(dev, link, qlen(dev->gadget, qlen 1079 drivers/usb/gadget/function/u_ether.c DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); qlen 26 drivers/usb/gadget/function/u_midi.h unsigned int qlen; qlen 52 drivers/usb/gadget/legacy/gmidi.c static unsigned int qlen = 32; qlen 53 drivers/usb/gadget/legacy/gmidi.c module_param(qlen, uint, S_IRUGO); qlen 54 drivers/usb/gadget/legacy/gmidi.c MODULE_PARM_DESC(qlen, "USB read and write request queue length"); qlen 156 drivers/usb/gadget/legacy/gmidi.c midi_opts->qlen = qlen; qlen 52 drivers/usb/gadget/legacy/printer.c static unsigned qlen = 10; qlen 53 drivers/usb/gadget/legacy/printer.c module_param(qlen, uint, S_IRUGO|S_IWUSR); qlen 55 drivers/usb/gadget/legacy/printer.c #define QLEN qlen qlen 66 drivers/usb/gadget/legacy/zero.c .qlen = GZERO_QLEN, qlen 254 drivers/usb/gadget/legacy/zero.c module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR); qlen 255 drivers/usb/gadget/legacy/zero.c MODULE_PARM_DESC(qlen, "depth of loopback queue"); qlen 313 drivers/usb/gadget/legacy/zero.c lb_opts->qlen = gzero_options.qlen; qlen 533 drivers/usb/host/ohci-dbg.c unsigned qlen = 0; qlen 537 drivers/usb/host/ohci-dbg.c qlen++; qlen 547 drivers/usb/host/ohci-dbg.c qlen, qlen 145 fs/adfs/dir.c static int __adfs_compare(const unsigned char *qstr, u32 qlen, qlen 150 fs/adfs/dir.c if (qlen != len) qlen 153 fs/adfs/dir.c for (i = 0; i < qlen; i++) qlen 79 include/crypto/algapi.h unsigned int qlen; qlen 194 include/crypto/algapi.h return queue->qlen; qlen 294 include/linux/skbuff.h __u32 qlen; qlen 1816 include/linux/skbuff.h return list_->qlen; qlen 1832 include/linux/skbuff.h list->qlen = 0; qlen 1873 include/linux/skbuff.h list->qlen++; qlen 1900 include/linux/skbuff.h head->qlen += list->qlen; qlen 1916 include/linux/skbuff.h head->qlen += list->qlen; qlen 1931 include/linux/skbuff.h head->qlen += list->qlen; qlen 1948 include/linux/skbuff.h head->qlen += list->qlen; qlen 2024 include/linux/skbuff.h list->qlen--; qlen 200 include/linux/sunrpc/sched.h unsigned short qlen; /* total # tasks waiting in queue */ qlen 55 include/net/gen_stats.h struct gnet_stats_queue *q, __u32 qlen); qlen 58 include/net/gen_stats.h const struct gnet_stats_queue *q, __u32 qlen); qlen 153 include/net/request_sock.h int qlen; /* # of pending (TCP_SYN_RECV) reqs */ qlen 171 include/net/request_sock.h atomic_t qlen; qlen 213 include/net/request_sock.h atomic_dec(&queue->qlen); qlen 219 include/net/request_sock.h atomic_inc(&queue->qlen); qlen 224 include/net/request_sock.h return atomic_read(&queue->qlen); qlen 53 include/net/sch_generic.h __u32 qlen; qlen 153 include/net/sch_generic.h return !READ_ONCE(qdisc->q.qlen); qlen 468 include/net/sch_generic.h return this_cpu_ptr(q->cpu_qstats)->qlen; qlen 473 include/net/sch_generic.h return q->q.qlen; qlen 478 include/net/sch_generic.h __u32 qlen = q->qstats.qlen; qlen 483 include/net/sch_generic.h qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; qlen 485 include/net/sch_generic.h qlen += q->q.qlen; qlen 488 include/net/sch_generic.h return qlen; qlen 868 include/net/sch_generic.h this_cpu_inc(sch->cpu_qstats->qlen); qlen 873 include/net/sch_generic.h this_cpu_dec(sch->cpu_qstats->qlen); qlen 913 include/net/sch_generic.h __u32 qlen = qdisc_qlen_sum(sch); qlen 915 include/net/sch_generic.h return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); qlen 918 include/net/sch_generic.h static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, qlen 925 include/net/sch_generic.h *qlen = qstats.qlen; qlen 931 include/net/sch_generic.h __u32 qlen, backlog; qlen 933 include/net/sch_generic.h qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); qlen 934 include/net/sch_generic.h qdisc_tree_reduce_backlog(sch, qlen, backlog); qlen 939 include/net/sch_generic.h __u32 qlen, backlog; qlen 941 include/net/sch_generic.h qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); qlen 943 include/net/sch_generic.h qdisc_tree_reduce_backlog(sch, qlen, backlog); qlen 950 include/net/sch_generic.h qh->qlen = 0; qlen 966 include/net/sch_generic.h qh->qlen++; qlen 984 include/net/sch_generic.h qh->qlen++; qlen 993 include/net/sch_generic.h qh->qlen--; qlen 1076 include/net/sch_generic.h sch->q.qlen++; qlen 1093 include/net/sch_generic.h sch->q.qlen--; qlen 1105 include/net/sch_generic.h sch->q.qlen++; qlen 1121 include/net/sch_generic.h sch->q.qlen--; qlen 1137 include/net/sch_generic.h if (qh->qlen) { qlen 1142 include/net/sch_generic.h qh->qlen = 0; qlen 18 include/sound/seq_midi_event.h int qlen; /* queue length */ qlen 478 include/trace/events/rcu.h long qlen), qlen 480 include/trace/events/rcu.h TP_ARGS(rcuname, rhp, qlen_lazy, qlen), qlen 487 include/trace/events/rcu.h __field(long, qlen) qlen 495 include/trace/events/rcu.h __entry->qlen = qlen; qlen 500 include/trace/events/rcu.h __entry->qlen_lazy, __entry->qlen) qlen 514 include/trace/events/rcu.h long qlen_lazy, long qlen), qlen 516 include/trace/events/rcu.h TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen), qlen 523 include/trace/events/rcu.h __field(long, qlen) qlen 531 include/trace/events/rcu.h __entry->qlen = qlen; qlen 536 include/trace/events/rcu.h __entry->qlen_lazy, __entry->qlen) qlen 548 include/trace/events/rcu.h TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit), qlen 550 include/trace/events/rcu.h TP_ARGS(rcuname, qlen_lazy, qlen, blimit), qlen 555 include/trace/events/rcu.h __field(long, qlen) qlen 562 include/trace/events/rcu.h __entry->qlen = qlen; qlen 567 include/trace/events/rcu.h __entry->rcuname, __entry->qlen_lazy, __entry->qlen, qlen 63 include/uapi/linux/gen_stats.h __u32 qlen; qlen 92 include/uapi/linux/i2o-dev.h unsigned int qlen; /* Length in bytes of query string buffer */ qlen 42 include/uapi/linux/pkt_sched.h __u32 qlen; qlen 367 net/atm/clip.c if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) qlen 287 net/atm/lec.c if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) { qlen 168 net/caif/caif_dev.c int err, high = 0, qlen = 0; qlen 201 net/caif/caif_dev.c if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high)) qlen 221 net/caif/caif_dev.c qlen, high); qlen 4118 net/core/dev.c static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) qlen 4125 net/core/dev.c if (qlen < (netdev_max_backlog >> 1)) qlen 4163 net/core/dev.c unsigned int qlen; qlen 4172 net/core/dev.c qlen = skb_queue_len(&sd->input_pkt_queue); qlen 4173 net/core/dev.c if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { qlen 4174 net/core/dev.c if (qlen) { qlen 290 net/core/gen_stats.c qstats->qlen = 0; qlen 301 net/core/gen_stats.c __u32 qlen) qlen 306 net/core/gen_stats.c qstats->qlen = q->qlen; qlen 313 net/core/gen_stats.c qstats->qlen = qlen; qlen 334 net/core/gen_stats.c struct gnet_stats_queue *q, __u32 qlen) qlen 338 net/core/gen_stats.c __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); qlen 342 net/core/gen_stats.c d->tc_stats.qlen = qstats.qlen; qlen 1573 net/core/neighbour.c if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) { qlen 2061 net/core/neighbour.c .ndtc_proxy_qlen = tbl->proxy_queue.qlen, qlen 231 net/core/netpoll.c while (skb_pool.qlen < MAX_SKBS) { qlen 41 net/core/request_sock.c queue->fastopenq.qlen = 0; qlen 101 net/core/request_sock.c fastopenq->qlen--; qlen 129 net/core/request_sock.c fastopenq->qlen++; qlen 181 net/dccp/input.c if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN)) qlen 23 net/dccp/qpolicy.c sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen; qlen 717 net/ipv4/inet_connection_sock.c int qlen, expire = 0, resend = 0; qlen 743 net/ipv4/inet_connection_sock.c qlen = reqsk_queue_len(queue); qlen 744 net/ipv4/inet_connection_sock.c if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) { qlen 748 net/ipv4/inet_connection_sock.c if (qlen < young) qlen 1174 net/ipv4/ipmr.c if (c->_c.mfc_un.unres.unresolved.qlen > 3) { qlen 247 net/ipv4/tcp_fastopen.c queue->fastopenq.qlen++; qlen 307 net/ipv4/tcp_fastopen.c if (fastopenq->qlen >= fastopenq->max_qlen) { qlen 318 net/ipv4/tcp_fastopen.c fastopenq->qlen--; qlen 1187 net/ipv6/ip6mr.c if (c->_c.mfc_un.unres.unresolved.qlen > 3) { qlen 119 net/kcm/kcmproc.c kcm->sk.sk_receive_queue.qlen, qlen 121 net/kcm/kcmproc.c kcm->sk.sk_write_queue.qlen, qlen 149 net/kcm/kcmproc.c psock->sk->sk_receive_queue.qlen, qlen 151 net/kcm/kcmproc.c psock->sk->sk_write_queue.qlen, qlen 167 net/kcm/kcmproc.c if (psock->sk->sk_receive_queue.qlen) { qlen 1814 net/mac80211/cfg.c pinfo->frame_qlen = mpath->frame_queue.qlen; qlen 61 net/netfilter/nfnetlink_log.c unsigned int qlen; /* number of nlmsgs in skb */ qlen 348 net/netfilter/nfnetlink_log.c if (inst->qlen > 1) { qlen 362 net/netfilter/nfnetlink_log.c inst->qlen = 0; qlen 791 net/netfilter/nfnetlink_log.c inst->qlen++; qlen 797 net/netfilter/nfnetlink_log.c if (inst->qlen >= qthreshold) qlen 1088 net/netfilter/nfnetlink_log.c inst->peer_portid, inst->qlen, qlen 324 net/nfc/hci/llc_shdlc.c if (shdlc->send_q.qlen == 0) { qlen 468 net/nfc/hci/llc_shdlc.c if (shdlc->rcv_q.qlen) qlen 469 net/nfc/hci/llc_shdlc.c pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen); qlen 524 net/nfc/hci/llc_shdlc.c if (shdlc->send_q.qlen) qlen 527 net/nfc/hci/llc_shdlc.c shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, qlen 530 net/nfc/hci/llc_shdlc.c shdlc->ack_pending_q.qlen); qlen 532 net/nfc/hci/llc_shdlc.c while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && qlen 1026 net/sched/act_api.c p->tcfa_qstats.qlen) < 0) qlen 427 net/sched/em_meta.c dst->value = sk->sk_receive_queue.qlen; qlen 438 net/sched/em_meta.c dst->value = sk->sk_write_queue.qlen; qlen 513 net/sched/em_meta.c dst->value = sk->sk_error_queue.qlen; qlen 781 net/sched/sch_api.c notify = !sch->q.qlen && !WARN_ON_ONCE(!n && qlen 794 net/sched/sch_api.c sch->q.qlen -= n; qlen 878 net/sched/sch_api.c __u32 qlen; qlen 910 net/sched/sch_api.c qlen = qdisc_qlen_sum(q); qlen 931 net/sched/sch_api.c gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) qlen 455 net/sched/sch_atm.c sch->q.qlen++; qlen 528 net/sched/sch_atm.c sch->q.qlen--; qlen 578 net/sched/sch_atm.c sch->q.qlen = 0; qlen 656 net/sched/sch_atm.c gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) qlen 1511 net/sched/sch_cake.c sch->q.qlen--; qlen 1658 net/sched/sch_cake.c if (!sch->q.qlen) { qlen 1695 net/sched/sch_cake.c sch->q.qlen++; qlen 1733 net/sched/sch_cake.c sch->q.qlen++; qlen 1863 net/sched/sch_cake.c sch->q.qlen--; qlen 1898 net/sched/sch_cake.c if (!sch->q.qlen) qlen 2136 net/sched/sch_cake.c if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { qlen 2141 net/sched/sch_cake.c } else if (!sch->q.qlen) { qlen 2965 net/sched/sch_cake.c qs.qlen++; qlen 2973 net/sched/sch_cake.c if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) qlen 378 net/sched/sch_cbq.c sch->q.qlen++; qlen 533 net/sched/sch_cbq.c if (cl->q->q.qlen > 1) { qlen 696 net/sched/sch_cbq.c if (cl->q->q.qlen && qlen 740 net/sched/sch_cbq.c if (cl->q->q.qlen == 0 || prio != cl->cpriority) { qlen 757 net/sched/sch_cbq.c if (cl->q->q.qlen) qlen 764 net/sched/sch_cbq.c if (cl->q->q.qlen) qlen 819 net/sched/sch_cbq.c sch->q.qlen--; qlen 853 net/sched/sch_cbq.c if (sch->q.qlen) { qlen 1056 net/sched/sch_cbq.c sch->q.qlen = 0; qlen 1378 net/sched/sch_cbq.c __u32 qlen; qlen 1382 net/sched/sch_cbq.c qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); qlen 1390 net/sched/sch_cbq.c gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) qlen 1553 net/sched/sch_cbq.c if (cl->q->q.qlen) qlen 99 net/sched/sch_cbs.c sch->q.qlen++; qlen 119 net/sched/sch_cbs.c if (sch->q.qlen == 0 && q->credits > 0) { qlen 170 net/sched/sch_cbs.c sch->q.qlen--; qlen 130 net/sched/sch_choke.c --sch->q.qlen; qlen 227 net/sched/sch_choke.c q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); qlen 274 net/sched/sch_choke.c if (sch->q.qlen < q->limit) { qlen 277 net/sched/sch_choke.c ++sch->q.qlen; qlen 304 net/sched/sch_choke.c --sch->q.qlen; qlen 324 net/sched/sch_choke.c sch->q.qlen = 0; qlen 388 net/sched/sch_choke.c unsigned int oqlen = sch->q.qlen, tail = 0; qlen 403 net/sched/sch_choke.c --sch->q.qlen; qlen 406 net/sched/sch_choke.c qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); qlen 101 net/sched/sch_codel.c if (q->stats.drop_count && sch->q.qlen) { qlen 138 net/sched/sch_codel.c unsigned int qlen, dropped = 0; qlen 175 net/sched/sch_codel.c qlen = sch->q.qlen; qlen 176 net/sched/sch_codel.c while (sch->q.qlen > sch->limit) { qlen 183 net/sched/sch_codel.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); qlen 261 net/sched/sch_drr.c __u32 qlen = qdisc_qlen_sum(cl->qdisc); qlen 266 net/sched/sch_drr.c if (qlen) qlen 272 net/sched/sch_drr.c gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) qlen 357 net/sched/sch_drr.c first = !cl->qdisc->q.qlen; qlen 373 net/sched/sch_drr.c sch->q.qlen++; qlen 400 net/sched/sch_drr.c if (cl->qdisc->q.qlen == 0) qlen 406 net/sched/sch_drr.c sch->q.qlen--; qlen 441 net/sched/sch_drr.c if (cl->qdisc->q.qlen) qlen 447 net/sched/sch_drr.c sch->q.qlen = 0; qlen 278 net/sched/sch_dsmark.c sch->q.qlen++; qlen 301 net/sched/sch_dsmark.c sch->q.qlen--; qlen 411 net/sched/sch_dsmark.c sch->q.qlen = 0; qlen 192 net/sched/sch_etf.c sch->q.qlen++; qlen 225 net/sched/sch_etf.c sch->q.qlen--; qlen 250 net/sched/sch_etf.c sch->q.qlen--; qlen 432 net/sched/sch_etf.c sch->q.qlen--; qlen 449 net/sched/sch_etf.c sch->q.qlen = 0; qlen 30 net/sched/sch_fifo.c if (likely(sch->q.qlen < sch->limit)) qlen 41 net/sched/sch_fifo.c if (likely(sch->q.qlen < sch->limit)) qlen 77 net/sched/sch_fq.c int qlen; /* number of packets in flow queue */ qlen 379 net/sched/sch_fq.c flow->qlen--; qlen 381 net/sched/sch_fq.c sch->q.qlen--; qlen 426 net/sched/sch_fq.c if (unlikely(sch->q.qlen >= sch->limit)) qlen 430 net/sched/sch_fq.c if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { qlen 435 net/sched/sch_fq.c f->qlen++; qlen 450 net/sched/sch_fq.c sch->q.qlen++; qlen 492 net/sched/sch_fq.c if (!sch->q.qlen) qlen 615 net/sched/sch_fq.c flow->qlen = 0; qlen 626 net/sched/sch_fq.c sch->q.qlen = 0; qlen 842 net/sched/sch_fq.c while (sch->q.qlen > sch->limit) { qlen 181 net/sched/sch_fq_codel.c sch->q.qlen -= i; qlen 218 net/sched/sch_fq_codel.c if (++sch->q.qlen <= sch->limit && !memory_limited) qlen 222 net/sched/sch_fq_codel.c prev_qlen = sch->q.qlen; qlen 233 net/sched/sch_fq_codel.c prev_qlen -= sch->q.qlen; qlen 268 net/sched/sch_fq_codel.c sch->q.qlen--; qlen 321 net/sched/sch_fq_codel.c if (q->cstats.drop_count && sch->q.qlen) { qlen 351 net/sched/sch_fq_codel.c sch->q.qlen = 0; qlen 425 net/sched/sch_fq_codel.c while (sch->q.qlen > sch->limit || qlen 647 net/sched/sch_fq_codel.c qs.qlen++; qlen 655 net/sched/sch_fq_codel.c if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) qlen 73 net/sched/sch_generic.c q->q.qlen--; qlen 113 net/sched/sch_generic.c q->q.qlen++; qlen 142 net/sched/sch_generic.c q->q.qlen++; qlen 241 net/sched/sch_generic.c q->q.qlen--; qlen 329 net/sched/sch_generic.c dev->name, ret, q->q.qlen); qlen 565 net/sched/sch_generic.c .qlen = 0, qlen 571 net/sched/sch_generic.c .qlen = 0, qlen 706 net/sched/sch_generic.c q->qlen = 0; qlen 727 net/sched/sch_generic.c unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; qlen 732 net/sched/sch_generic.c if (!qlen) qlen 739 net/sched/sch_generic.c err = skb_array_init(q, qlen, GFP_KERNEL); qlen 927 net/sched/sch_generic.c qdisc->q.qlen = 0; qlen 385 net/sched/sch_gred.c sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; qlen 749 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) qlen 984 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen != 0) { qlen 1322 net/sched/sch_hfsc.c __u32 qlen; qlen 1324 net/sched/sch_hfsc.c qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog); qlen 1332 net/sched/sch_hfsc.c gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) qlen 1488 net/sched/sch_hfsc.c sch->q.qlen = 0; qlen 1547 net/sched/sch_hfsc.c first = !cl->qdisc->q.qlen; qlen 1573 net/sched/sch_hfsc.c sch->q.qlen++; qlen 1588 net/sched/sch_hfsc.c if (sch->q.qlen == 0) qlen 1626 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen != 0) { qlen 1641 net/sched/sch_hfsc.c sch->q.qlen--; qlen 362 net/sched/sch_hhf.c sch->q.qlen--; qlen 402 net/sched/sch_hhf.c if (++sch->q.qlen <= sch->limit) qlen 445 net/sched/sch_hhf.c sch->q.qlen--; qlen 513 net/sched/sch_hhf.c unsigned int qlen, prev_backlog; qlen 563 net/sched/sch_hhf.c qlen = sch->q.qlen; qlen 565 net/sched/sch_hhf.c while (sch->q.qlen > sch->limit) { qlen 570 net/sched/sch_hhf.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, qlen 557 net/sched/sch_htb.c WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); qlen 589 net/sched/sch_htb.c if (q->direct_queue.qlen < q->direct_qlen) { qlen 614 net/sched/sch_htb.c sch->q.qlen++; qlen 844 net/sched/sch_htb.c if (unlikely(cl->leaf.q->q.qlen == 0)) { qlen 882 net/sched/sch_htb.c if (!cl->leaf.q->q.qlen) qlen 903 net/sched/sch_htb.c sch->q.qlen--; qlen 907 net/sched/sch_htb.c if (!sch->q.qlen) qlen 969 net/sched/sch_htb.c sch->q.qlen = 0; qlen 1128 net/sched/sch_htb.c __u32 qlen = 0; qlen 1131 net/sched/sch_htb.c qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); qlen 1141 net/sched/sch_htb.c gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) qlen 133 net/sched/sch_mq.c __u32 qlen = 0; qlen 135 net/sched/sch_mq.c sch->q.qlen = 0; qlen 149 net/sched/sch_mq.c qlen = qdisc_qlen_sum(qdisc); qlen 155 net/sched/sch_mq.c &qdisc->qstats, qlen); qlen 156 net/sched/sch_mq.c sch->q.qlen += qlen; qlen 158 net/sched/sch_mq.c sch->q.qlen += qdisc->q.qlen; qlen 161 net/sched/sch_mq.c sch->qstats.qlen += qdisc->qstats.qlen; qlen 392 net/sched/sch_mqprio.c sch->q.qlen = 0; qlen 406 net/sched/sch_mqprio.c __u32 qlen = qdisc_qlen_sum(qdisc); qlen 413 net/sched/sch_mqprio.c &qdisc->qstats, qlen); qlen 414 net/sched/sch_mqprio.c sch->q.qlen += qlen; qlen 416 net/sched/sch_mqprio.c sch->q.qlen += qdisc->q.qlen; qlen 515 net/sched/sch_mqprio.c __u32 qlen = 0; qlen 541 net/sched/sch_mqprio.c qlen = qdisc_qlen_sum(qdisc); qlen 547 net/sched/sch_mqprio.c qlen); qlen 555 net/sched/sch_mqprio.c gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) qlen 79 net/sched/sch_multiq.c sch->q.qlen++; qlen 109 net/sched/sch_multiq.c sch->q.qlen--; qlen 155 net/sched/sch_multiq.c sch->q.qlen = 0; qlen 405 net/sched/sch_netem.c sch->q.qlen++; qlen 520 net/sched/sch_netem.c if (unlikely(sch->q.qlen >= sch->limit)) { qlen 701 net/sched/sch_netem.c sch->q.qlen--; qlen 204 net/sched/sch_pie.c unsigned int qlen, dropped = 0; qlen 251 net/sched/sch_pie.c qlen = sch->q.qlen; qlen 252 net/sched/sch_pie.c while (sch->q.qlen > sch->limit) { qlen 259 net/sched/sch_pie.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); qlen 268 net/sched/sch_pie.c int qlen = sch->qstats.backlog; /* current queue size in bytes */ qlen 274 net/sched/sch_pie.c if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) { qlen 313 net/sched/sch_pie.c if (qlen < QUEUE_THRESHOLD) { qlen 333 net/sched/sch_pie.c u32 qlen = sch->qstats.backlog; /* queue size in bytes */ qlen 345 net/sched/sch_pie.c qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; qlen 352 net/sched/sch_pie.c if (qdelay == 0 && qlen != 0) qlen 428 net/sched/sch_pie.c q->vars.qlen_old = qlen; qlen 89 net/sched/sch_prio.c sch->q.qlen++; qlen 122 net/sched/sch_prio.c sch->q.qlen--; qlen 139 net/sched/sch_prio.c sch->q.qlen = 0; qlen 318 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) { /* adding an active class */ qlen 367 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) /* class is active */ qlen 981 net/sched/sch_qfq.c if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ qlen 1117 net/sched/sch_qfq.c } else if (sch->q.qlen == 0) { /* no aggregate to serve */ qlen 1133 net/sched/sch_qfq.c sch->q.qlen--; qlen 1227 net/sched/sch_qfq.c first = !cl->qdisc->q.qlen; qlen 1241 net/sched/sch_qfq.c ++sch->q.qlen; qlen 1459 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) qlen 1466 net/sched/sch_qfq.c sch->q.qlen = 0; qlen 100 net/sched/sch_red.c sch->q.qlen++; qlen 122 net/sched/sch_red.c sch->q.qlen--; qlen 144 net/sched/sch_red.c sch->q.qlen = 0; qlen 249 net/sched/sch_red.c if (!q->qdisc->q.qlen) qlen 40 net/sched/sch_sfb.c u16 qlen; /* length of virtual queue */ qlen 132 net/sched/sch_sfb.c if (b[hash].qlen < 0xFFFF) qlen 133 net/sched/sch_sfb.c b[hash].qlen++; qlen 161 net/sched/sch_sfb.c if (b[hash].qlen > 0) qlen 162 net/sched/sch_sfb.c b[hash].qlen--; qlen 201 net/sched/sch_sfb.c u32 qlen = 0, prob = 0, totalpm = 0; qlen 205 net/sched/sch_sfb.c if (qlen < b->qlen) qlen 206 net/sched/sch_sfb.c qlen = b->qlen; qlen 214 net/sched/sch_sfb.c return qlen; qlen 293 net/sched/sch_sfb.c if (unlikely(sch->q.qlen >= q->limit)) { qlen 333 net/sched/sch_sfb.c if (b->qlen == 0) qlen 335 net/sched/sch_sfb.c else if (b->qlen >= q->bin_size) qlen 337 net/sched/sch_sfb.c if (minqlen > b->qlen) qlen 338 net/sched/sch_sfb.c minqlen = b->qlen; qlen 366 net/sched/sch_sfb.c if (b->qlen == 0) qlen 368 net/sched/sch_sfb.c else if (b->qlen >= q->bin_size) qlen 405 net/sched/sch_sfb.c sch->q.qlen++; qlen 434 net/sched/sch_sfb.c sch->q.qlen--; qlen 457 net/sched/sch_sfb.c sch->q.qlen = 0; qlen 103 net/sched/sch_sfq.c sfq_index qlen; /* number of skbs in skblist */ qlen 207 net/sched/sch_sfq.c int qlen = slot->qlen; qlen 209 net/sched/sch_sfq.c p = qlen + SFQ_MAX_FLOWS; qlen 210 net/sched/sch_sfq.c n = q->dep[qlen].next; qlen 215 net/sched/sch_sfq.c q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ qlen 235 net/sched/sch_sfq.c d = q->slots[x].qlen--; qlen 248 net/sched/sch_sfq.c d = ++q->slots[x].qlen; qlen 310 net/sched/sch_sfq.c sch->q.qlen--; qlen 350 net/sched/sch_sfq.c sfq_index x, qlen; qlen 424 net/sched/sch_sfq.c if (slot->qlen >= q->maxdepth) { qlen 446 net/sched/sch_sfq.c if (slot->qlen == 1) { /* The flow is new */ qlen 461 net/sched/sch_sfq.c if (++sch->q.qlen <= q->limit) qlen 464 net/sched/sch_sfq.c qlen = slot->qlen; qlen 469 net/sched/sch_sfq.c if (qlen != slot->qlen) { qlen 502 net/sched/sch_sfq.c sch->q.qlen--; qlen 506 net/sched/sch_sfq.c if (slot->qlen == 0) { qlen 549 net/sched/sch_sfq.c if (!slot->qlen) qlen 551 net/sched/sch_sfq.c while (slot->qlen) { qlen 581 net/sched/sch_sfq.c if (slot->qlen >= q->maxdepth) qlen 590 net/sched/sch_sfq.c if (slot->qlen == 1) { /* The flow is new */ qlen 601 net/sched/sch_sfq.c sch->q.qlen -= dropped; qlen 628 net/sched/sch_sfq.c unsigned int qlen, dropped = 0; qlen 689 net/sched/sch_sfq.c qlen = sch->q.qlen; qlen 690 net/sched/sch_sfq.c while (sch->q.qlen > q->limit) { qlen 697 net/sched/sch_sfq.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); qlen 874 net/sched/sch_sfq.c qs.qlen = slot->qlen; qlen 877 net/sched/sch_sfq.c if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) qlen 82 net/sched/sch_skbprio.c if (sch->q.qlen < sch->limit) { qlen 94 net/sched/sch_skbprio.c sch->q.qlen++; qlen 125 net/sched/sch_skbprio.c BUG_ON(sch->q.qlen != 1); qlen 148 net/sched/sch_skbprio.c sch->q.qlen--; qlen 157 net/sched/sch_skbprio.c BUG_ON(sch->q.qlen); qlen 217 net/sched/sch_skbprio.c sch->q.qlen = 0; qlen 258 net/sched/sch_skbprio.c q->qstats[cl - 1].qlen) < 0) qlen 436 net/sched/sch_taprio.c sch->q.qlen++; qlen 599 net/sched/sch_taprio.c sch->q.qlen--; qlen 629 net/sched/sch_taprio.c sch->q.qlen--; qlen 172 net/sched/sch_tbf.c sch->q.qlen += nb; qlen 200 net/sched/sch_tbf.c sch->q.qlen++; qlen 245 net/sched/sch_tbf.c sch->q.qlen--; qlen 275 net/sched/sch_tbf.c sch->q.qlen = 0; qlen 81 net/sched/sch_teql.c if (q->q.qlen < dev->tx_queue_len) { qlen 110 net/sched/sch_teql.c sch->q.qlen = dat->q.qlen + q->q.qlen; qlen 127 net/sched/sch_teql.c sch->q.qlen = 0; qlen 219 net/sunrpc/sched.c queue->qlen++; qlen 247 net/sunrpc/sched.c queue->qlen--; qlen 261 net/sunrpc/sched.c queue->qlen = 0; qlen 1479 net/sunrpc/xprt.c xprt->stat.bklog_u += xprt->backlog.qlen; qlen 1480 net/sunrpc/xprt.c xprt->stat.sending_u += xprt->sending.qlen; qlen 1481 net/sunrpc/xprt.c xprt->stat.pending_u += xprt->pending.qlen; qlen 71 net/unix/diag.c sk->sk_receive_queue.qlen * sizeof(u32)); qlen 106 net/unix/diag.c rql.udiag_rqueue = sk->sk_receive_queue.qlen; qlen 2838 net/xfrm/xfrm_policy.c if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { qlen 104 sound/core/seq/oss/seq_oss_ioctl.c return put_user(dp->readq->qlen, p) ? -EFAULT : 0; qlen 48 sound/core/seq/oss/seq_oss_readq.c q->qlen = 0; qlen 76 sound/core/seq/oss/seq_oss_readq.c if (q->qlen) { qlen 77 sound/core/seq/oss/seq_oss_readq.c q->qlen = 0; qlen 146 sound/core/seq/oss/seq_oss_readq.c if (q->qlen >= q->maxlen - 1) { qlen 153 sound/core/seq/oss/seq_oss_readq.c q->qlen++; qlen 171 sound/core/seq/oss/seq_oss_readq.c if (q->qlen == 0) qlen 184 sound/core/seq/oss/seq_oss_readq.c (q->qlen > 0 || q->head == q->tail), qlen 195 sound/core/seq/oss/seq_oss_readq.c if (q->qlen > 0) { qlen 197 sound/core/seq/oss/seq_oss_readq.c q->qlen--; qlen 209 sound/core/seq/oss/seq_oss_readq.c return q->qlen; qlen 248 sound/core/seq/oss/seq_oss_readq.c q->qlen, q->input_time); qlen 20 sound/core/seq/oss/seq_oss_readq.h int qlen; qlen 50 sound/core/seq/seq_midi_event.c int qlen; qlen 141 sound/core/seq/seq_midi_event.c dev->qlen = 0; qlen 200 sound/core/seq/seq_midi_event.c dev->qlen = status_event[dev->type].qlen; qlen 202 sound/core/seq/seq_midi_event.c if (dev->qlen > 0) { qlen 206 sound/core/seq/seq_midi_event.c dev->qlen--; qlen 210 sound/core/seq/seq_midi_event.c dev->qlen = status_event[dev->type].qlen - 1; qlen 214 sound/core/seq/seq_midi_event.c if (dev->qlen == 0) { qlen 320 sound/core/seq/seq_midi_event.c int qlen; qlen 331 sound/core/seq/seq_midi_event.c qlen = status_event[type].qlen + 1; qlen 336 sound/core/seq/seq_midi_event.c qlen = status_event[type].qlen; qlen 338 sound/core/seq/seq_midi_event.c if (count < qlen) qlen 340 sound/core/seq/seq_midi_event.c memcpy(buf, xbuf, qlen); qlen 341 sound/core/seq/seq_midi_event.c return qlen; qlen 41 tools/include/uapi/linux/pkt_sched.h __u32 qlen; qlen 97 tools/testing/selftests/net/tcp_fastopen_backup_key.c int qlen = 100; qlen 134 tools/testing/selftests/net/tcp_fastopen_backup_key.c if (setsockopt(rcv_fds[i], SOL_TCP, TCP_FASTOPEN, &qlen, qlen 135 tools/testing/selftests/net/tcp_fastopen_backup_key.c sizeof(qlen)))