backlog           898 crypto/algapi.c 	queue->backlog = &queue->list;
backlog           915 crypto/algapi.c 		if (queue->backlog == &queue->list)
backlog           916 crypto/algapi.c 			queue->backlog = &request->list;
backlog           936 crypto/algapi.c 	if (queue->backlog != &queue->list)
backlog           937 crypto/algapi.c 		queue->backlog = queue->backlog->next;
backlog           160 crypto/cryptd.c 	struct crypto_async_request *req, *backlog;
backlog           171 crypto/cryptd.c 	backlog = crypto_get_backlog(&cpu_queue->queue);
backlog           179 crypto/cryptd.c 	if (backlog)
backlog           180 crypto/cryptd.c 		backlog->complete(backlog, -EINPROGRESS);
backlog            68 crypto/crypto_engine.c 	struct crypto_async_request *async_req, *backlog;
backlog           112 crypto/crypto_engine.c 	backlog = crypto_get_backlog(&engine->queue);
backlog           118 crypto/crypto_engine.c 	if (backlog)
backlog           119 crypto/crypto_engine.c 		backlog->complete(backlog, -EINPROGRESS);
backlog          1195 drivers/atm/eni.c 			while ((skb = skb_dequeue(&tx->backlog))) {
backlog          1199 drivers/atm/eni.c 				skb_queue_head(&tx->backlog,skb);
backlog          1334 drivers/atm/eni.c 		skb_queue_head_init(&tx->backlog);
backlog          1409 drivers/atm/eni.c 		txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing;
backlog          2080 drivers/atm/eni.c 	skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb);
backlog          2173 drivers/atm/eni.c 		    skb_queue_len(&tx->backlog));
backlog            50 drivers/atm/eni.h 	struct sk_buff_head backlog;	/* queue of waiting TX buffers */
backlog           239 drivers/atm/lanai.c 		struct sk_buff_head backlog;
backlog           773 drivers/atm/lanai.c 	while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL)
backlog          1149 drivers/atm/lanai.c 	return !skb_queue_empty(&lvcc->tx.backlog);
backlog          1308 drivers/atm/lanai.c 		skb = skb_dequeue(&lvcc->tx.backlog);
backlog          1314 drivers/atm/lanai.c 			skb_queue_head(&lvcc->tx.backlog, skb);
backlog          1340 drivers/atm/lanai.c 		skb_queue_tail(&lvcc->tx.backlog, skb);
backlog          1470 drivers/atm/lanai.c 		skb_queue_head_init(&lvcc->tx.backlog);
backlog           734 drivers/atm/zatm.c 	while ((skb = skb_dequeue(&zatm_vcc->backlog)))
backlog           736 drivers/atm/zatm.c 			skb_queue_head(&zatm_vcc->backlog,skb);
backlog           883 drivers/atm/zatm.c 	if (skb_peek(&zatm_vcc->backlog)) {
backlog           886 drivers/atm/zatm.c 		wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog));
backlog           967 drivers/atm/zatm.c 	skb_queue_head_init(&zatm_vcc->backlog);
backlog          1550 drivers/atm/zatm.c 	skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb);
backlog            55 drivers/atm/zatm.h 	struct sk_buff_head backlog;	/* list of buffers waiting for ring */
backlog           936 drivers/crypto/atmel-aes.c 	struct crypto_async_request *areq, *backlog;
backlog           949 drivers/crypto/atmel-aes.c 	backlog = crypto_get_backlog(&dd->queue);
backlog           958 drivers/crypto/atmel-aes.c 	if (backlog)
backlog           959 drivers/crypto/atmel-aes.c 		backlog->complete(backlog, -EINPROGRESS);
backlog          1054 drivers/crypto/atmel-sha.c 	struct crypto_async_request *async_req, *backlog;
backlog          1069 drivers/crypto/atmel-sha.c 	backlog = crypto_get_backlog(&dd->queue);
backlog          1079 drivers/crypto/atmel-sha.c 	if (backlog)
backlog          1080 drivers/crypto/atmel-sha.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           588 drivers/crypto/atmel-tdes.c 	struct crypto_async_request *async_req, *backlog;
backlog           601 drivers/crypto/atmel-tdes.c 	backlog = crypto_get_backlog(&dd->queue);
backlog           610 drivers/crypto/atmel-tdes.c 	if (backlog)
backlog           611 drivers/crypto/atmel-tdes.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           506 drivers/crypto/cavium/nitrox/nitrox_req.h 	struct list_head backlog;
backlog           230 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	INIT_LIST_HEAD(&sr->backlog);
backlog           233 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
backlog           326 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
backlog           333 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		list_del(&sr->backlog);
backlog            55 drivers/crypto/ccp/ccp-crypto-main.c 	struct list_head *backlog;
backlog            97 drivers/crypto/ccp/ccp-crypto-main.c 	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
backlog           102 drivers/crypto/ccp/ccp-crypto-main.c 	*backlog = NULL;
backlog           121 drivers/crypto/ccp/ccp-crypto-main.c 	if (req_queue.backlog != &req_queue.cmds) {
backlog           123 drivers/crypto/ccp/ccp-crypto-main.c 		if (req_queue.backlog == &crypto_cmd->entry)
backlog           124 drivers/crypto/ccp/ccp-crypto-main.c 			req_queue.backlog = crypto_cmd->entry.next;
backlog           126 drivers/crypto/ccp/ccp-crypto-main.c 		*backlog = container_of(req_queue.backlog,
backlog           128 drivers/crypto/ccp/ccp-crypto-main.c 		req_queue.backlog = req_queue.backlog->next;
backlog           131 drivers/crypto/ccp/ccp-crypto-main.c 		if (req_queue.backlog == &crypto_cmd->entry)
backlog           132 drivers/crypto/ccp/ccp-crypto-main.c 			req_queue.backlog = crypto_cmd->entry.next;
backlog           147 drivers/crypto/ccp/ccp-crypto-main.c 	struct ccp_crypto_cmd *held, *next, *backlog;
backlog           166 drivers/crypto/ccp/ccp-crypto-main.c 	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
backlog           167 drivers/crypto/ccp/ccp-crypto-main.c 	if (backlog) {
backlog           168 drivers/crypto/ccp/ccp-crypto-main.c 		backlog->ret = -EINPROGRESS;
backlog           169 drivers/crypto/ccp/ccp-crypto-main.c 		backlog->req->complete(backlog->req, -EINPROGRESS);
backlog           198 drivers/crypto/ccp/ccp-crypto-main.c 		next = ccp_crypto_cmd_complete(held, &backlog);
backlog           199 drivers/crypto/ccp/ccp-crypto-main.c 		if (backlog) {
backlog           200 drivers/crypto/ccp/ccp-crypto-main.c 			backlog->ret = -EINPROGRESS;
backlog           201 drivers/crypto/ccp/ccp-crypto-main.c 			backlog->req->complete(backlog->req, -EINPROGRESS);
backlog           248 drivers/crypto/ccp/ccp-crypto-main.c 		if (req_queue.backlog == &req_queue.cmds)
backlog           249 drivers/crypto/ccp/ccp-crypto-main.c 			req_queue.backlog = &crypto_cmd->entry;
backlog           415 drivers/crypto/ccp/ccp-crypto-main.c 	req_queue.backlog = &req_queue.cmds;
backlog           560 drivers/crypto/ccp/ccp-dev-v3.c 	while (!list_empty(&ccp->backlog)) {
backlog           562 drivers/crypto/ccp/ccp-dev-v3.c 		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
backlog          1046 drivers/crypto/ccp/ccp-dev-v5.c 	while (!list_empty(&ccp->backlog)) {
backlog          1048 drivers/crypto/ccp/ccp-dev-v5.c 		cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
backlog           315 drivers/crypto/ccp/ccp-dev.c 			list_add_tail(&cmd->entry, &ccp->backlog);
backlog           378 drivers/crypto/ccp/ccp-dev.c 	struct ccp_cmd *backlog = NULL;
backlog           403 drivers/crypto/ccp/ccp-dev.c 	if (!list_empty(&ccp->backlog)) {
backlog           404 drivers/crypto/ccp/ccp-dev.c 		backlog = list_first_entry(&ccp->backlog, struct ccp_cmd,
backlog           406 drivers/crypto/ccp/ccp-dev.c 		list_del(&backlog->entry);
backlog           411 drivers/crypto/ccp/ccp-dev.c 	if (backlog) {
backlog           412 drivers/crypto/ccp/ccp-dev.c 		INIT_WORK(&backlog->work, ccp_do_cmd_backlog);
backlog           413 drivers/crypto/ccp/ccp-dev.c 		schedule_work(&backlog->work);
backlog           488 drivers/crypto/ccp/ccp-dev.c 	INIT_LIST_HEAD(&ccp->backlog);
backlog           375 drivers/crypto/ccp/ccp-dev.h 	struct list_head backlog;
backlog            34 drivers/crypto/ccree/cc_request_mgr.c 	struct list_head backlog;
backlog           130 drivers/crypto/ccree/cc_request_mgr.c 	INIT_LIST_HEAD(&req_mgr_h->backlog);
backlog           342 drivers/crypto/ccree/cc_request_mgr.c 	list_add_tail(&bli->list, &mgr->backlog);
backlog           361 drivers/crypto/ccree/cc_request_mgr.c 		bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
backlog          2039 drivers/crypto/hifn_795x.c 	struct crypto_async_request *async_req, *backlog;
backlog          2046 drivers/crypto/hifn_795x.c 		backlog = crypto_get_backlog(&dev->queue);
backlog          2053 drivers/crypto/hifn_795x.c 		if (backlog)
backlog          2054 drivers/crypto/hifn_795x.c 			backlog->complete(backlog, -EINPROGRESS);
backlog           495 drivers/crypto/hisilicon/sec/sec_algs.c 	} else if (!list_empty(&ctx->backlog)) {
backlog           497 drivers/crypto/hisilicon/sec/sec_algs.c 		backlog_req = list_first_entry(&ctx->backlog,
backlog           811 drivers/crypto/hisilicon/sec/sec_algs.c 	    !list_empty(&ctx->backlog)) {
backlog           814 drivers/crypto/hisilicon/sec/sec_algs.c 			list_add_tail(&sec_req->backlog_head, &ctx->backlog);
backlog           875 drivers/crypto/hisilicon/sec/sec_algs.c 	INIT_LIST_HEAD(&ctx->backlog);
backlog           250 drivers/crypto/hisilicon/sec/sec_drv.h 	struct list_head backlog;
backlog           497 drivers/crypto/img-hash.c 	struct crypto_async_request *async_req, *backlog;
backlog           512 drivers/crypto/img-hash.c 	backlog = crypto_get_backlog(&hdev->queue);
backlog           522 drivers/crypto/img-hash.c 	if (backlog)
backlog           523 drivers/crypto/img-hash.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           771 drivers/crypto/inside-secure/safexcel.c 	struct crypto_async_request *req, *backlog;
backlog           779 drivers/crypto/inside-secure/safexcel.c 	backlog = priv->ring[ring].backlog;
backlog           785 drivers/crypto/inside-secure/safexcel.c 		backlog = crypto_get_backlog(&priv->ring[ring].queue);
backlog           791 drivers/crypto/inside-secure/safexcel.c 			priv->ring[ring].backlog = NULL;
backlog           801 drivers/crypto/inside-secure/safexcel.c 		if (backlog)
backlog           802 drivers/crypto/inside-secure/safexcel.c 			backlog->complete(backlog, -EINPROGRESS);
backlog           821 drivers/crypto/inside-secure/safexcel.c 	priv->ring[ring].backlog = backlog;
backlog           642 drivers/crypto/inside-secure/safexcel.h 	struct crypto_async_request *backlog;
backlog            39 drivers/crypto/marvell/cesa.c 			   struct crypto_async_request **backlog)
backlog            43 drivers/crypto/marvell/cesa.c 	*backlog = crypto_get_backlog(&engine->queue);
backlog            54 drivers/crypto/marvell/cesa.c 	struct crypto_async_request *req = NULL, *backlog = NULL;
backlog            60 drivers/crypto/marvell/cesa.c 		req = mv_cesa_dequeue_req_locked(engine, &backlog);
backlog            68 drivers/crypto/marvell/cesa.c 	if (backlog)
backlog            69 drivers/crypto/marvell/cesa.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           726 drivers/crypto/marvell/cesa.h 			   struct crypto_async_request **backlog);
backlog           136 drivers/crypto/marvell/tdma.c 			struct crypto_async_request *backlog = NULL;
backlog           149 drivers/crypto/marvell/tdma.c 								 &backlog);
backlog           170 drivers/crypto/marvell/tdma.c 			if (backlog)
backlog           171 drivers/crypto/marvell/tdma.c 				backlog->complete(backlog, -EINPROGRESS);
backlog           513 drivers/crypto/mediatek/mtk-aes.c 	struct crypto_async_request *areq, *backlog;
backlog           525 drivers/crypto/mediatek/mtk-aes.c 	backlog = crypto_get_backlog(&aes->queue);
backlog           534 drivers/crypto/mediatek/mtk-aes.c 	if (backlog)
backlog           535 drivers/crypto/mediatek/mtk-aes.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           654 drivers/crypto/mediatek/mtk-sha.c 	struct crypto_async_request *async_req, *backlog;
backlog           668 drivers/crypto/mediatek/mtk-sha.c 	backlog = crypto_get_backlog(&sha->queue);
backlog           677 drivers/crypto/mediatek/mtk-sha.c 	if (backlog)
backlog           678 drivers/crypto/mediatek/mtk-sha.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           394 drivers/crypto/mxs-dcp.c 	struct crypto_async_request *backlog;
backlog           403 drivers/crypto/mxs-dcp.c 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
backlog           407 drivers/crypto/mxs-dcp.c 		if (!backlog && !arq) {
backlog           414 drivers/crypto/mxs-dcp.c 		if (backlog)
backlog           415 drivers/crypto/mxs-dcp.c 			backlog->complete(backlog, -EINPROGRESS);
backlog           693 drivers/crypto/mxs-dcp.c 	struct crypto_async_request *backlog;
backlog           701 drivers/crypto/mxs-dcp.c 		backlog = crypto_get_backlog(&sdcp->queue[chan]);
backlog           705 drivers/crypto/mxs-dcp.c 		if (!backlog && !arq) {
backlog           712 drivers/crypto/mxs-dcp.c 		if (backlog)
backlog           713 drivers/crypto/mxs-dcp.c 			backlog->complete(backlog, -EINPROGRESS);
backlog           250 drivers/crypto/omap-aes-gcm.c 	struct aead_request *backlog;
backlog           263 drivers/crypto/omap-aes-gcm.c 	backlog = aead_get_backlog(&dd->aead_queue);
backlog           272 drivers/crypto/omap-aes-gcm.c 	if (backlog)
backlog           273 drivers/crypto/omap-aes-gcm.c 		backlog->base.complete(&backlog->base, -EINPROGRESS);
backlog          1117 drivers/crypto/omap-sham.c 	struct crypto_async_request *async_req, *backlog;
backlog          1130 drivers/crypto/omap-sham.c 	backlog = crypto_get_backlog(&dd->queue);
backlog          1139 drivers/crypto/omap-sham.c 	if (backlog)
backlog          1140 drivers/crypto/omap-sham.c 		backlog->complete(backlog, -EINPROGRESS);
backlog            75 drivers/crypto/qce/core.c 	struct crypto_async_request *async_req, *backlog;
backlog            90 drivers/crypto/qce/core.c 	backlog = crypto_get_backlog(&qce->queue);
backlog           100 drivers/crypto/qce/core.c 	if (backlog) {
backlog           102 drivers/crypto/qce/core.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           206 drivers/crypto/rockchip/rk3288_crypto.c 	struct crypto_async_request *async_req, *backlog;
backlog           212 drivers/crypto/rockchip/rk3288_crypto.c 	backlog   = crypto_get_backlog(&dev->queue);
backlog           222 drivers/crypto/rockchip/rk3288_crypto.c 	if (backlog) {
backlog           223 drivers/crypto/rockchip/rk3288_crypto.c 		backlog->complete(backlog, -EINPROGRESS);
backlog           224 drivers/crypto/rockchip/rk3288_crypto.c 		backlog = NULL;
backlog          1375 drivers/crypto/s5p-sss.c 	struct crypto_async_request *async_req, *backlog;
backlog          1390 drivers/crypto/s5p-sss.c 	backlog = crypto_get_backlog(&dd->hash_queue);
backlog          1400 drivers/crypto/s5p-sss.c 	if (backlog)
backlog          1401 drivers/crypto/s5p-sss.c 		backlog->complete(backlog, -EINPROGRESS);
backlog          2006 drivers/crypto/s5p-sss.c 	struct crypto_async_request *async_req, *backlog;
backlog          2011 drivers/crypto/s5p-sss.c 	backlog   = crypto_get_backlog(&dev->queue);
backlog          2021 drivers/crypto/s5p-sss.c 	if (backlog)
backlog          2022 drivers/crypto/s5p-sss.c 		backlog->complete(backlog, -EINPROGRESS);
backlog          1052 drivers/crypto/sahara.c 	struct crypto_async_request *backlog;
backlog          1059 drivers/crypto/sahara.c 		backlog = crypto_get_backlog(&dev->queue);
backlog          1063 drivers/crypto/sahara.c 		if (backlog)
backlog          1064 drivers/crypto/sahara.c 			backlog->complete(backlog, -EINPROGRESS);
backlog          2430 drivers/infiniband/core/cma.c static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
backlog          2448 drivers/infiniband/core/cma.c 	ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
backlog          2498 drivers/infiniband/core/cma.c 	ret = rdma_listen(id, id_priv->backlog);
backlog          3536 drivers/infiniband/core/cma.c int rdma_listen(struct rdma_cm_id *id, int backlog)
backlog          3558 drivers/infiniband/core/cma.c 	id_priv->backlog = backlog;
backlog          3565 drivers/infiniband/core/cma.c 			ret = cma_iw_listen(id_priv, backlog);
backlog          3577 drivers/infiniband/core/cma.c 	id_priv->backlog = 0;
backlog            72 drivers/infiniband/core/cma_priv.h 	int			backlog;
backlog           562 drivers/infiniband/core/iwcm.c int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
backlog           570 drivers/infiniband/core/iwcm.c 	if (!backlog)
backlog           571 drivers/infiniband/core/iwcm.c 		backlog = default_backlog;
backlog           573 drivers/infiniband/core/iwcm.c 	ret = alloc_work_entries(cm_id_priv, backlog);
backlog           585 drivers/infiniband/core/iwcm.c 								  backlog);
backlog            90 drivers/infiniband/core/ucma.c 	int			backlog;
backlog           364 drivers/infiniband/core/ucma.c 		if (!ctx->backlog) {
backlog           369 drivers/infiniband/core/ucma.c 		ctx->backlog--;
backlog           435 drivers/infiniband/core/ucma.c 		uevent->ctx->backlog++;
backlog          1110 drivers/infiniband/core/ucma.c 	ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
backlog          1111 drivers/infiniband/core/ucma.c 		       cmd.backlog : max_backlog;
backlog          1113 drivers/infiniband/core/ucma.c 	ret = rdma_listen(ctx->cm_id, ctx->backlog);
backlog          1972 drivers/infiniband/hw/cxgb3/iwch_cm.c int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
backlog          1996 drivers/infiniband/hw/cxgb3/iwch_cm.c 	ep->backlog = backlog;
backlog           172 drivers/infiniband/hw/cxgb3/iwch_cm.h 	int backlog;
backlog           218 drivers/infiniband/hw/cxgb3/iwch_cm.h int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
backlog          3536 drivers/infiniband/hw/cxgb4/cm.c int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
backlog          3555 drivers/infiniband/hw/cxgb4/cm.c 	ep->backlog = backlog;
backlog           635 drivers/infiniband/hw/cxgb4/device.c 			      ep->com.flags, ep->stid, ep->backlog,
backlog           648 drivers/infiniband/hw/cxgb4/device.c 			      ep->com.flags, ep->stid, ep->backlog,
backlog           871 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	int backlog;
backlog           976 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
backlog           247 drivers/infiniband/hw/cxgb4/restrack.c 		if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
backlog          2599 drivers/infiniband/hw/i40iw/i40iw_cm.c 		    cm_node->listener->backlog) {
backlog          2899 drivers/infiniband/hw/i40iw/i40iw_cm.c 	listener->backlog = cm_info->backlog;
backlog          3958 drivers/infiniband/hw/i40iw/i40iw_cm.c int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
backlog          3996 drivers/infiniband/hw/i40iw/i40iw_cm.c 	cm_info.backlog = backlog;
backlog           297 drivers/infiniband/hw/i40iw/i40iw_cm.h 	int backlog;
backlog           373 drivers/infiniband/hw/i40iw/i40iw_cm.h 	int backlog;
backlog           540 drivers/infiniband/hw/qedr/qedr.h 	int		backlog;
backlog           646 drivers/infiniband/hw/qedr/qedr_iw_cm.c int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
backlog           670 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	listener->backlog = backlog;
backlog           674 drivers/infiniband/hw/qedr/qedr_iw_cm.c 	iparams.max_backlog = backlog;
backlog            37 drivers/infiniband/hw/qedr/qedr_iw_cm.h int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
backlog          1792 drivers/infiniband/sw/siw/siw_cm.c int siw_create_listen(struct iw_cm_id *id, int backlog)
backlog          1847 drivers/infiniband/sw/siw/siw_cm.c 	rv = siw_cm_alloc_work(cep, backlog);
backlog          1851 drivers/infiniband/sw/siw/siw_cm.c 			rv, backlog);
backlog          1854 drivers/infiniband/sw/siw/siw_cm.c 	rv = s->ops->listen(s, backlog);
backlog           117 drivers/infiniband/sw/siw/siw_cm.h int siw_create_listen(struct iw_cm_id *id, int backlog);
backlog          2470 drivers/md/md-bitmap.c 	unsigned long backlog;
backlog          2472 drivers/md/md-bitmap.c 	int rv = kstrtoul(buf, 10, &backlog);
backlog          2475 drivers/md/md-bitmap.c 	if (backlog > COUNTER_MAX)
backlog          2477 drivers/md/md-bitmap.c 	mddev->bitmap_info.max_write_behind = backlog;
backlog          2478 drivers/md/md-bitmap.c 	if (!backlog && mddev->wb_info_pool) {
backlog          2482 drivers/md/md-bitmap.c 	} else if (backlog && !mddev->wb_info_pool) {
backlog          2489 drivers/md/md-bitmap.c 	if (old_mwb != backlog)
backlog          2495 drivers/md/md-bitmap.c __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
backlog           385 drivers/misc/mic/scif/scif_api.c int scif_listen(scif_epd_t epd, int backlog)
backlog           411 drivers/misc/mic/scif/scif_api.c 	ep->backlog = backlog;
backlog           136 drivers/misc/mic/scif/scif_epd.c 	if (ep->backlog <= ep->conreqcnt) {
backlog           101 drivers/misc/mic/scif/scif_epd.h 	int backlog;
backlog           192 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		list_add_tail(&buf->list, &conn->qp.sq.backlog);
backlog           305 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
backlog           307 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 			nextbuf = list_first_entry(&conn->qp.sq.backlog,
backlog           651 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
backlog           866 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	INIT_LIST_HEAD(&conn->qp.sq.backlog);
backlog            75 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h 			struct list_head backlog;
backlog          1140 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		xstats->backlog[i] =
backlog          4192 drivers/net/ethernet/mellanox/mlxsw/spectrum.c 		mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
backlog           223 drivers/net/ethernet/mellanox/mlxsw/spectrum.h 	u64 backlog[TC_MAX_QUEUE];
backlog            59 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 		u64 backlog;
backlog           201 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	return xstats->backlog[tclass_num] +
backlog           202 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	       xstats->backlog[tclass_num + 8];
backlog           291 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	stats_base->backlog = 0;
backlog           301 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 		root_qdisc->stats_base.backlog -=
backlog           302 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 					mlxsw_sp_qdisc->stats_base.backlog;
backlog           363 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	u64 backlog;
backlog           365 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
backlog           366 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 				       mlxsw_sp_qdisc->stats_base.backlog);
backlog           367 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	p->qstats->backlog -= backlog;
backlog           368 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	mlxsw_sp_qdisc->stats_base.backlog = 0;
backlog           404 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	u64 tx_bytes, tx_packets, overlimits, drops, backlog;
backlog           423 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
backlog           428 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	stats_ptr->qstats->backlog +=
backlog           430 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						     backlog) -
backlog           432 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						     stats_base->backlog);
backlog           434 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	stats_base->backlog = backlog;
backlog           525 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	int tclass, i, band, backlog;
backlog           547 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			backlog = child_qdisc->stats_base.backlog;
backlog           550 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 			child_qdisc->stats_base.backlog = backlog;
backlog           568 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	u64 backlog;
backlog           570 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
backlog           571 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 				       mlxsw_sp_qdisc->stats_base.backlog);
backlog           572 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	p->qstats->backlog -= backlog;
backlog           580 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
backlog           596 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 		backlog += mlxsw_sp_xstats_backlog(xstats, i);
backlog           602 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	stats_ptr->qstats->backlog +=
backlog           604 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						     backlog) -
backlog           606 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 						     stats_base->backlog);
backlog           607 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	stats_base->backlog = backlog;
backlog           636 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c 	mlxsw_sp_qdisc->stats_base.backlog = 0;
backlog           467 drivers/net/ethernet/netronome/nfp/abm/qdisc.c 	qstats->backlog += new->backlog_bytes - old->backlog_bytes;
backlog            98 drivers/net/ipvlan/ipvlan.h 	struct sk_buff_head	backlog;
backlog           240 drivers/net/ipvlan/ipvlan_core.c 	spin_lock_bh(&port->backlog.lock);
backlog           241 drivers/net/ipvlan/ipvlan_core.c 	skb_queue_splice_tail_init(&port->backlog, &list);
backlog           242 drivers/net/ipvlan/ipvlan_core.c 	spin_unlock_bh(&port->backlog.lock);
backlog           550 drivers/net/ipvlan/ipvlan_core.c 	spin_lock(&port->backlog.lock);
backlog           551 drivers/net/ipvlan/ipvlan_core.c 	if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
backlog           554 drivers/net/ipvlan/ipvlan_core.c 		__skb_queue_tail(&port->backlog, skb);
backlog           555 drivers/net/ipvlan/ipvlan_core.c 		spin_unlock(&port->backlog.lock);
backlog           558 drivers/net/ipvlan/ipvlan_core.c 		spin_unlock(&port->backlog.lock);
backlog            75 drivers/net/ipvlan/ipvlan_main.c 	skb_queue_head_init(&port->backlog);
backlog           100 drivers/net/ipvlan/ipvlan_main.c 	while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
backlog           255 drivers/net/wireless/intersil/p54/lmac.h 	u8 backlog;
backlog           449 drivers/net/wireless/intersil/p54/lmac.h 	u8 backlog;
backlog           927 drivers/net/wireless/intersil/p54/txrx.c 	txhdr->backlog = priv->tx_stats[queue].len - 1;
backlog           858 drivers/target/iscsi/iscsi_target_login.c 	int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len;
backlog           935 drivers/target/iscsi/iscsi_target_login.c 	ret = kernel_listen(sock, backlog);
backlog           688 drivers/xen/pvcalls-back.c 	ret = inet_listen(map->sock, req->u.listen.backlog);
backlog           713 drivers/xen/pvcalls-front.c int pvcalls_front_listen(struct socket *sock, int backlog)
backlog           741 drivers/xen/pvcalls-front.c 	req->u.listen.backlog = backlog;
backlog            12 drivers/xen/pvcalls-front.h int pvcalls_front_listen(struct socket *sock, int backlog);
backlog            77 include/crypto/algapi.h 	struct list_head *backlog;
backlog           352 include/crypto/algapi.h 	return queue->backlog == &queue->list ? NULL :
backlog           353 include/crypto/algapi.h 	       container_of(queue->backlog, struct crypto_async_request, list);
backlog          1715 include/linux/lsm_hooks.h 	int (*socket_listen)(struct socket *sock, int backlog);
backlog           299 include/linux/net.h int kernel_listen(struct socket *sock, int backlog);
backlog          3020 include/linux/netdevice.h 	struct napi_struct	backlog;
backlog           233 include/linux/scif.h int scif_listen(scif_epd_t epd, int backlog);
backlog          1287 include/linux/security.h int security_socket_listen(struct socket *sock, int backlog);
backlog          1376 include/linux/security.h static inline int security_socket_listen(struct socket *sock, int backlog)
backlog           401 include/linux/socket.h extern int __sys_listen(int fd, int backlog);
backlog           204 include/linux/sunrpc/xprt.h 	struct rpc_wait_queue	backlog;	/* waiting for slot */
backlog           454 include/linux/tcp.h static inline void fastopen_queue_tune(struct sock *sk, int backlog)
backlog           459 include/linux/tcp.h 	queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn);
backlog           107 include/net/codel_impl.h 			      u32 *backlog,
backlog           125 include/net/codel_impl.h 	    *backlog <= params->mtu) {
backlog           143 include/net/codel_impl.h 				     u32 *backlog,
backlog           162 include/net/codel_impl.h 				 skb_len_func, skb_time_func, backlog, now);
backlog           198 include/net/codel_impl.h 						       backlog, now)) {
backlog           223 include/net/codel_impl.h 						 skb_time_func, backlog, now);
backlog            34 include/net/fq.h 	u32 backlog;
backlog            77 include/net/fq.h 	u32 backlog;
backlog            22 include/net/fq_impl.h 	flow->backlog -= skb->len;
backlog            23 include/net/fq_impl.h 	fq->backlog--;
backlog            31 include/net/fq_impl.h 	if (flow->backlog == 0) {
backlog            37 include/net/fq_impl.h 			if (i->backlog < flow->backlog)
backlog           150 include/net/fq_impl.h 		if (i->backlog > flow->backlog)
backlog           170 include/net/fq_impl.h 	flow->backlog += skb->len;
backlog           174 include/net/fq_impl.h 	fq->backlog++;
backlog           186 include/net/fq_impl.h 	while (fq->backlog > fq->limit || oom) {
backlog           264 include/net/fq_impl.h 	WARN_ON_ONCE(flow->backlog);
backlog            35 include/net/inet_common.h int inet_listen(struct socket *sock, int backlog);
backlog           302 include/net/inet_connection_sock.h int inet_csk_listen_start(struct sock *sk, int backlog);
backlog           761 include/net/pkt_cls.h 	u32 *backlog;
backlog           292 include/net/red.h 						       unsigned int backlog)
backlog           303 include/net/red.h 	return v->qavg + (backlog - (v->qavg >> p->Wlog));
backlog           308 include/net/red.h 					  unsigned int backlog)
backlog           311 include/net/red.h 		return red_calc_qavg_no_idle_time(p, v, backlog);
backlog           845 include/net/sch_generic.h 	sch->qstats.backlog -= qdisc_pkt_len(skb);
backlog           851 include/net/sch_generic.h 	this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
backlog           857 include/net/sch_generic.h 	sch->qstats.backlog += qdisc_pkt_len(skb);
backlog           863 include/net/sch_generic.h 	this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
backlog           919 include/net/sch_generic.h 					     __u32 *backlog)
backlog           926 include/net/sch_generic.h 	*backlog = qstats.backlog;
backlog           931 include/net/sch_generic.h 	__u32 qlen, backlog;
backlog           933 include/net/sch_generic.h 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
backlog           934 include/net/sch_generic.h 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
backlog           939 include/net/sch_generic.h 	__u32 qlen, backlog;
backlog           941 include/net/sch_generic.h 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
backlog           943 include/net/sch_generic.h 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
backlog          1102 include/net/sch_generic.h 		this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
backlog          1104 include/net/sch_generic.h 		sch->qstats.backlog += pkt_len;
backlog          1149 include/net/sch_generic.h 	sch->qstats.backlog = 0;
backlog            94 include/net/sctp/sctp.h int sctp_inet_listen(struct socket *sock, int backlog);
backlog          2537 include/rdma/ib_verbs.h 	int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
backlog           174 include/rdma/iw_cm.h int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
backlog           274 include/rdma/rdma_cm.h int rdma_listen(struct rdma_cm_id *id, int backlog);
backlog           301 include/trace/events/sunrpc.h 		ktime_t backlog,
backlog           306 include/trace/events/sunrpc.h 	TP_ARGS(task, backlog, rtt, execute),
backlog           315 include/trace/events/sunrpc.h 		__field(unsigned long, backlog)
backlog           327 include/trace/events/sunrpc.h 		__entry->backlog = ktime_to_us(backlog);
backlog           335 include/trace/events/sunrpc.h 		__entry->backlog, __entry->rtt, __entry->execute)
backlog           461 include/uapi/linux/audit.h 	__u32		backlog;	/* messages waiting in queue */
backlog            64 include/uapi/linux/gen_stats.h 	__u32	backlog;
backlog            43 include/uapi/linux/pkt_sched.h 	__u32	backlog;
backlog           332 include/uapi/linux/pkt_sched.h 	__u32		backlog;
backlog           217 include/uapi/rdma/rdma_user_cm.h 	__u32 backlog;
backlog            66 include/xen/interface/io/pvcalls.h             uint32_t backlog;
backlog          1203 kernel/audit.c 		s.backlog		= skb_queue_len(&audit_queue);
backlog           282 net/atm/svc.c  static int svc_listen(struct socket *sock, int backlog)
backlog           315 net/atm/svc.c  	sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
backlog           778 net/ax25/af_ax25.c static int ax25_listen(struct socket *sock, int backlog)
backlog           785 net/ax25/af_ax25.c 		sk->sk_max_ack_backlog = backlog;
backlog           253 net/bluetooth/l2cap_sock.c static int l2cap_sock_listen(struct socket *sock, int backlog)
backlog           259 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p backlog %d", sk, backlog);
backlog           287 net/bluetooth/l2cap_sock.c 	sk->sk_max_ack_backlog = backlog;
backlog           427 net/bluetooth/rfcomm/sock.c static int rfcomm_sock_listen(struct socket *sock, int backlog)
backlog           432 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p backlog %d", sk, backlog);
backlog           467 net/bluetooth/rfcomm/sock.c 	sk->sk_max_ack_backlog = backlog;
backlog           588 net/bluetooth/sco.c static int sco_sock_listen(struct socket *sock, int backlog)
backlog           594 net/bluetooth/sco.c 	BT_DBG("sk %p backlog %d", sk, backlog);
backlog           615 net/bluetooth/sco.c 	sk->sk_max_ack_backlog = backlog;
backlog          4087 net/core/dev.c 	____napi_schedule(sd, &sd->backlog);
backlog          4186 net/core/dev.c 		if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
backlog          4188 net/core/dev.c 				____napi_schedule(sd, &sd->backlog);
backlog          5851 net/core/dev.c 	struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
backlog          10209 net/core/dev.c 		init_gro_hash(&sd->backlog);
backlog          10210 net/core/dev.c 		sd->backlog.poll = process_backlog;
backlog          10211 net/core/dev.c 		sd->backlog.weight = weight_p;
backlog           291 net/core/gen_stats.c 		qstats->backlog += qcpu->backlog;
backlog           307 net/core/gen_stats.c 		qstats->backlog = q->backlog;
backlog           343 net/core/gen_stats.c 		d->tc_stats.backlog = qstats.backlog;
backlog          2685 net/core/sock.c int sock_no_listen(struct socket *sock, int backlog)
backlog           315 net/dccp/dccp.h int inet_dccp_listen(struct socket *sock, int backlog);
backlog           241 net/dccp/proto.c static inline int dccp_listen_start(struct sock *sk, int backlog)
backlog           249 net/dccp/proto.c 	return inet_csk_listen_start(sk, backlog);
backlog           931 net/dccp/proto.c int inet_dccp_listen(struct socket *sock, int backlog)
backlog           947 net/dccp/proto.c 	sk->sk_max_ack_backlog = backlog;
backlog           956 net/dccp/proto.c 		err = dccp_listen_start(sk, backlog);
backlog          1265 net/decnet/af_decnet.c static int dn_listen(struct socket *sock, int backlog)
backlog          1278 net/decnet/af_decnet.c 	sk->sk_max_ack_backlog = backlog;
backlog           195 net/ipv4/af_inet.c int inet_listen(struct socket *sock, int backlog)
backlog           211 net/ipv4/af_inet.c 	sk->sk_max_ack_backlog = backlog;
backlog           226 net/ipv4/af_inet.c 			fastopen_queue_tune(sk, backlog);
backlog           230 net/ipv4/af_inet.c 		err = inet_csk_listen_start(sk, backlog);
backlog           892 net/ipv4/inet_connection_sock.c int inet_csk_listen_start(struct sock *sk, int backlog)
backlog           934 net/iucv/af_iucv.c static int iucv_sock_listen(struct socket *sock, int backlog)
backlog           948 net/iucv/af_iucv.c 	sk->sk_max_ack_backlog = backlog;
backlog           523 net/llc/af_llc.c static int llc_ui_listen(struct socket *sock, int backlog)
backlog           538 net/llc/af_llc.c 	if (!(unsigned int)backlog)	/* BSDism */
backlog           539 net/llc/af_llc.c 		backlog = 1;
backlog           540 net/llc/af_llc.c 	sk->sk_max_ack_backlog = backlog;
backlog          3921 net/mac80211/cfg.c 		txqstats->backlog_packets = local->fq.backlog;
backlog            98 net/mac80211/debugfs.c 			fq->backlog,
backlog          1367 net/mac80211/tx.c 			     &flow->backlog,
backlog          3348 net/mac80211/tx.c 		flow->backlog += head->len - orig_len;
backlog           376 net/netrom/af_netrom.c static int nr_listen(struct socket *sock, int backlog)
backlog           383 net/netrom/af_netrom.c 		sk->sk_max_ack_backlog = backlog;
backlog           193 net/nfc/llcp_sock.c static int llcp_sock_listen(struct socket *sock, int backlog)
backlog           198 net/nfc/llcp_sock.c 	pr_debug("sk %p backlog %d\n", sk, backlog);
backlog           208 net/nfc/llcp_sock.c 	sk->sk_max_ack_backlog = backlog;
backlog           394 net/phonet/socket.c static int pn_socket_listen(struct socket *sock, int backlog)
backlog           412 net/phonet/socket.c 	sk->sk_max_ack_backlog = backlog;
backlog           463 net/rose/af_rose.c static int rose_listen(struct socket *sock, int backlog)
backlog           474 net/rose/af_rose.c 		sk->sk_max_ack_backlog = backlog;
backlog           209 net/rxrpc/af_rxrpc.c static int rxrpc_listen(struct socket *sock, int backlog)
backlog           216 net/rxrpc/af_rxrpc.c 	_enter("%p,%d", rx, backlog);
backlog           229 net/rxrpc/af_rxrpc.c 		if (backlog == INT_MAX)
backlog           230 net/rxrpc/af_rxrpc.c 			backlog = max;
backlog           231 net/rxrpc/af_rxrpc.c 		else if (backlog < 0 || backlog > max)
backlog           234 net/rxrpc/af_rxrpc.c 		sk->sk_max_ack_backlog = backlog;
backlog           242 net/rxrpc/af_rxrpc.c 		if (backlog == 0) {
backlog           136 net/rxrpc/ar-internal.h 	struct rxrpc_backlog	*backlog;	/* Preallocation for services */
backlog           163 net/rxrpc/call_accept.c 	struct rxrpc_backlog *b = rx->backlog;
backlog           169 net/rxrpc/call_accept.c 		rx->backlog = b;
backlog           187 net/rxrpc/call_accept.c 	struct rxrpc_backlog *b = rx->backlog;
backlog           193 net/rxrpc/call_accept.c 	rx->backlog = NULL;
backlog           270 net/rxrpc/call_accept.c 	struct rxrpc_backlog *b = rx->backlog;
backlog           661 net/rxrpc/call_accept.c 	struct rxrpc_backlog *b = rx->backlog;
backlog           795 net/sched/sch_api.c 		sch->qstats.backlog -= len;
backlog          1500 net/sched/sch_cake.c 	sch->qstats.backlog -= len;
backlog          1707 net/sched/sch_cake.c 		sch->qstats.backlog += slen;
backlog          1742 net/sched/sch_cake.c 		sch->qstats.backlog += len;
backlog          1861 net/sched/sch_cake.c 		sch->qstats.backlog      -= len;
backlog          2970 net/sched/sch_cake.c 		qs.backlog = b->backlogs[idx % CAKE_QUEUES];
backlog          1382 net/sched/sch_cbq.c 	qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
backlog            98 net/sched/sch_cbs.c 	sch->qstats.backlog += len;
backlog           325 net/sched/sch_choke.c 	sch->qstats.backlog = 0;
backlog            75 net/sched/sch_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
backlog            94 net/sched/sch_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
backlog           372 net/sched/sch_drr.c 	sch->qstats.backlog += len;
backlog           446 net/sched/sch_drr.c 	sch->qstats.backlog = 0;
backlog           277 net/sched/sch_dsmark.c 	sch->qstats.backlog += len;
backlog           410 net/sched/sch_dsmark.c 	sch->qstats.backlog = 0;
backlog           448 net/sched/sch_etf.c 	sch->qstats.backlog = 0;
backlog            21 net/sched/sch_fifo.c 	if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
backlog            44 net/sched/sch_fifo.c 	prev_backlog = sch->qstats.backlog;
backlog            50 net/sched/sch_fifo.c 	qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
backlog           627 net/sched/sch_fq.c 	sch->qstats.backlog = 0;
backlog           180 net/sched/sch_fq_codel.c 	sch->qstats.backlog -= len;
backlog           221 net/sched/sch_fq_codel.c 	prev_backlog = sch->qstats.backlog;
backlog           234 net/sched/sch_fq_codel.c 	prev_backlog -= sch->qstats.backlog;
backlog           269 net/sched/sch_fq_codel.c 		sch->qstats.backlog -= qdisc_pkt_len(skb);
backlog           304 net/sched/sch_fq_codel.c 	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
backlog           352 net/sched/sch_fq_codel.c 	sch->qstats.backlog = 0;
backlog           652 net/sched/sch_fq_codel.c 		qs.backlog = q->backlogs[idx];
backlog           705 net/sched/sch_generic.c 			q->backlog = 0;
backlog           928 net/sched/sch_generic.c 	qdisc->qstats.backlog = 0;
backlog            39 net/sched/sch_gred.c 	u32		backlog;	/* bytes on the virtualQ */
backlog           117 net/sched/sch_gred.c 		return sch->qstats.backlog;
backlog           119 net/sched/sch_gred.c 		return q->backlog;
backlog           181 net/sched/sch_gred.c 			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
backlog           247 net/sched/sch_gred.c 		q->backlog += qdisc_pkt_len(skb);
backlog           275 net/sched/sch_gred.c 			q->backlog -= qdisc_pkt_len(skb);
backlog           278 net/sched/sch_gred.c 				if (!sch->qstats.backlog)
backlog           281 net/sched/sch_gred.c 				if (!q->backlog)
backlog           306 net/sched/sch_gred.c 		q->backlog = 0;
backlog           344 net/sched/sch_gred.c 			opt.set.tab[i].backlog = &q->backlog;
backlog           380 net/sched/sch_gred.c 		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
backlog           386 net/sched/sch_gred.c 		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
backlog           503 net/sched/sch_gred.c 	if (q->backlog == 0)
backlog           813 net/sched/sch_gred.c 		opt.backlog	= gred_backlog(table, q, sch);
backlog          1324 net/sched/sch_hfsc.c 	qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
backlog          1487 net/sched/sch_hfsc.c 	sch->qstats.backlog = 0;
backlog          1572 net/sched/sch_hfsc.c 	sch->qstats.backlog += len;
backlog           405 net/sched/sch_hhf.c 	prev_backlog = sch->qstats.backlog;
backlog           414 net/sched/sch_hhf.c 	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
backlog           564 net/sched/sch_hhf.c 	prev_backlog = sch->qstats.backlog;
backlog           571 net/sched/sch_hhf.c 				  prev_backlog - sch->qstats.backlog);
backlog           613 net/sched/sch_htb.c 	sch->qstats.backlog += len;
backlog           970 net/sched/sch_htb.c 	sch->qstats.backlog = 0;
backlog          1131 net/sched/sch_htb.c 		qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
backlog           162 net/sched/sch_mq.c 			sch->qstats.backlog	+= qdisc->qstats.backlog;
backlog           419 net/sched/sch_mqprio.c 			sch->qstats.backlog	+= qdisc->qstats.backlog;
backlog           113 net/sched/sch_pie.c 	if (sch->qstats.backlog < 2 * mtu)
backlog           268 net/sched/sch_pie.c 	int qlen = sch->qstats.backlog;	/* current queue size in bytes */
backlog           333 net/sched/sch_pie.c 	u32 qlen = sch->qstats.backlog;	/* queue size in bytes */
backlog            92 net/sched/sch_plug.c 	if (likely(sch->qstats.backlog + skb->len <= q->limit)) {
backlog            88 net/sched/sch_prio.c 		sch->qstats.backlog += len;
backlog           138 net/sched/sch_prio.c 	sch->qstats.backlog = 0;
backlog          1240 net/sched/sch_qfq.c 	sch->qstats.backlog += len;
backlog          1465 net/sched/sch_qfq.c 	sch->qstats.backlog = 0;
backlog            66 net/sched/sch_red.c 				     child->qstats.backlog);
backlog           143 net/sched/sch_red.c 	sch->qstats.backlog = 0;
backlog           456 net/sched/sch_sfb.c 	sch->qstats.backlog = 0;
backlog           581 net/sched/sch_sfb.c 	sch->qstats.backlog = q->qdisc->qstats.backlog;
backlog           109 net/sched/sch_sfq.c 	unsigned int    backlog;
backlog           308 net/sched/sch_sfq.c 		slot->backlog -= len;
backlog           374 net/sched/sch_sfq.c 		slot->backlog = 0; /* should already be 0 anyway... */
backlog           381 net/sched/sch_sfq.c 							slot->backlog);
backlog           432 net/sched/sch_sfq.c 		sch->qstats.backlog -= delta;
backlog           433 net/sched/sch_sfq.c 		slot->backlog -= delta;
backlog           443 net/sched/sch_sfq.c 	slot->backlog += qdisc_pkt_len(skb);
backlog           504 net/sched/sch_sfq.c 	slot->backlog -= qdisc_pkt_len(skb);
backlog           556 net/sched/sch_sfq.c 		slot->backlog = 0;
backlog           587 net/sched/sch_sfq.c 							slot->backlog);
backlog           588 net/sched/sch_sfq.c 		slot->backlog += qdisc_pkt_len(skb);
backlog           875 net/sched/sch_sfq.c 		qs.backlog = slot->backlog;
backlog            85 net/sched/sch_skbprio.c 		q->qstats[prio].backlog += qdisc_pkt_len(skb);
backlog           108 net/sched/sch_skbprio.c 	q->qstats[prio].backlog += qdisc_pkt_len(skb);
backlog           117 net/sched/sch_skbprio.c 	q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
backlog           152 net/sched/sch_skbprio.c 	q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
backlog           216 net/sched/sch_skbprio.c 	sch->qstats.backlog = 0;
backlog           199 net/sched/sch_tbf.c 	sch->qstats.backlog += len;
backlog           274 net/sched/sch_tbf.c 	sch->qstats.backlog = 0;
backlog           446 net/sched/sch_tbf.c 	sch->qstats.backlog = q->qdisc->qstats.backlog;
backlog          8353 net/sctp/socket.c static int sctp_listen_start(struct sock *sk, int backlog)
backlog          8394 net/sctp/socket.c 	sk->sk_max_ack_backlog = backlog;
backlog          8412 net/sctp/socket.c int sctp_inet_listen(struct socket *sock, int backlog)
backlog          8418 net/sctp/socket.c 	if (unlikely(backlog < 0))
backlog          8434 net/sctp/socket.c 	if (!backlog) {
backlog          8448 net/sctp/socket.c 		sk->sk_max_ack_backlog = backlog;
backlog          8450 net/sctp/socket.c 		err = sctp_listen_start(sk, backlog);
backlog          1395 net/smc/af_smc.c static int smc_listen(struct socket *sock, int backlog)
backlog          1411 net/smc/af_smc.c 		sk->sk_max_ack_backlog = backlog;
backlog          1421 net/smc/af_smc.c 	rc = kernel_listen(smc->clcsock, backlog);
backlog          1424 net/smc/af_smc.c 	sk->sk_max_ack_backlog = backlog;
backlog          1667 net/socket.c   int __sys_listen(int fd, int backlog)
backlog          1676 net/socket.c   		if ((unsigned int)backlog > somaxconn)
backlog          1677 net/socket.c   			backlog = somaxconn;
backlog          1679 net/socket.c   		err = security_socket_listen(sock, backlog);
backlog          1681 net/socket.c   			err = sock->ops->listen(sock, backlog);
backlog          1688 net/socket.c   SYSCALL_DEFINE2(listen, int, fd, int, backlog)
backlog          1690 net/socket.c   	return __sys_listen(fd, backlog);
backlog          3591 net/socket.c   int kernel_listen(struct socket *sock, int backlog)
backlog          3593 net/socket.c   	return sock->ops->listen(sock, backlog);
backlog           154 net/sunrpc/stats.c 	ktime_t backlog, execute, now;
backlog           170 net/sunrpc/stats.c 	backlog = 0;
backlog           172 net/sunrpc/stats.c 		backlog = ktime_sub(req->rq_xtime, task->tk_start);
backlog           173 net/sunrpc/stats.c 		op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
backlog           185 net/sunrpc/stats.c 	trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
backlog          1479 net/sunrpc/xprt.c 	xprt->stat.bklog_u += xprt->backlog.qlen;
backlog          1534 net/sunrpc/xprt.c 	rpc_sleep_on(&xprt->backlog, task, NULL);
backlog          1539 net/sunrpc/xprt.c 	if (rpc_wake_up_next(&xprt->backlog) == NULL)
backlog          1551 net/sunrpc/xprt.c 		rpc_sleep_on(&xprt->backlog, task, NULL);
backlog          1873 net/sunrpc/xprt.c 	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
backlog          1943 net/sunrpc/xprt.c 	rpc_destroy_wait_queue(&xprt->backlog);
backlog           577 net/sunrpc/xprtrdma/transport.c 	rpc_sleep_on(&xprt->backlog, task, NULL);
backlog           595 net/sunrpc/xprtrdma/transport.c 	if (unlikely(!rpc_wake_up_next(&xprt->backlog)))
backlog           164 net/tipc/link.c 	} backlog[5];
backlog           864 net/tipc/link.c 		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
backlog           907 net/tipc/link.c 		l->backlog[imp].len = 0;
backlog           908 net/tipc/link.c 		l->backlog[imp].target_bskb = NULL;
backlog           964 net/tipc/link.c 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
backlog          1003 net/tipc/link.c 		tskb = &l->backlog[imp].target_bskb;
backlog          1012 net/tipc/link.c 			l->backlog[imp].len++;
backlog          1017 net/tipc/link.c 		l->backlog[imp].target_bskb = NULL;
backlog          1018 net/tipc/link.c 		l->backlog[imp].len += skb_queue_len(list);
backlog          1045 net/tipc/link.c 		l->backlog[imp].len--;
backlog          1046 net/tipc/link.c 		if (unlikely(skb == l->backlog[imp].target_bskb))
backlog          1047 net/tipc/link.c 			l->backlog[imp].target_bskb = NULL;
backlog          2302 net/tipc/link.c 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
backlog          2303 net/tipc/link.c 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
backlog          2304 net/tipc/link.c 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
backlog          2305 net/tipc/link.c 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
backlog          2306 net/tipc/link.c 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
backlog          2746 net/tipc/link.c 			       l->backlog[TIPC_LOW_IMPORTANCE].len,
backlog          2747 net/tipc/link.c 			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
backlog          2748 net/tipc/link.c 			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
backlog          2749 net/tipc/link.c 			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
backlog          2750 net/tipc/link.c 			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
backlog           607 net/unix/af_unix.c static int unix_listen(struct socket *sock, int backlog)
backlog           623 net/unix/af_unix.c 	if (backlog > sk->sk_max_ack_backlog)
backlog           625 net/unix/af_unix.c 	sk->sk_max_ack_backlog	= backlog;
backlog          1332 net/vmw_vsock/af_vsock.c static int vsock_listen(struct socket *sock, int backlog)
backlog          1359 net/vmw_vsock/af_vsock.c 	sk->sk_max_ack_backlog = backlog;
backlog           489 net/x25/af_x25.c static int x25_listen(struct socket *sock, int backlog)
backlog           497 net/x25/af_x25.c 		sk->sk_max_ack_backlog = backlog;
backlog           888 security/apparmor/lsm.c static int apparmor_socket_listen(struct socket *sock, int backlog)
backlog           895 security/apparmor/lsm.c 			 listen_perm(sock, backlog),
backlog          2001 security/security.c int security_socket_listen(struct socket *sock, int backlog)
backlog          2003 security/security.c 	return call_int_hook(socket_listen, 0, sock, backlog);
backlog          4806 security/selinux/hooks.c static int selinux_socket_listen(struct socket *sock, int backlog)
backlog           439 security/tomoyo/tomoyo.c static int tomoyo_socket_listen(struct socket *sock, int backlog)
backlog            42 tools/include/uapi/linux/pkt_sched.h 	__u32	backlog;
backlog           331 tools/include/uapi/linux/pkt_sched.h 	__u32		backlog;
backlog            32 tools/perf/ui/gtk/helpline.c 	static int backlog;
backlog            34 tools/perf/ui/gtk/helpline.c 	ret = vscnprintf(ui_helpline__current + backlog,
backlog            35 tools/perf/ui/gtk/helpline.c 			 sizeof(ui_helpline__current) - backlog, fmt, ap);
backlog            36 tools/perf/ui/gtk/helpline.c 	backlog += ret;
backlog            40 tools/perf/ui/gtk/helpline.c 	if (ptr && (ptr - ui_helpline__current) <= backlog) {
backlog            43 tools/perf/ui/gtk/helpline.c 		backlog = 0;
backlog            34 tools/perf/ui/tui/helpline.c 	static int backlog;
backlog            37 tools/perf/ui/tui/helpline.c 	ret = vscnprintf(ui_helpline__last_msg + backlog,
backlog            38 tools/perf/ui/tui/helpline.c 			sizeof(ui_helpline__last_msg) - backlog, format, ap);
backlog            39 tools/perf/ui/tui/helpline.c 	backlog += ret;
backlog            43 tools/perf/ui/tui/helpline.c 	if (ui_helpline__last_msg[backlog - 1] == '\n') {
backlog            46 tools/perf/ui/tui/helpline.c 		backlog = 0;