/linux-4.4.14/drivers/crypto/ccp/ |
D | ccp-crypto-main.c | 47 struct list_head *backlog; member 89 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() argument 94 *backlog = NULL; in ccp_crypto_cmd_complete() 113 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 115 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 116 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 118 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete() 120 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete() 123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() [all …]
|
D | ccp-dev.c | 114 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd() 174 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local 199 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd() 200 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd() 202 list_del(&backlog->entry); in ccp_dequeue_cmd() 207 if (backlog) { in ccp_dequeue_cmd() 208 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); in ccp_dequeue_cmd() 209 schedule_work(&backlog->work); in ccp_dequeue_cmd() 304 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct() 513 while (!list_empty(&ccp->backlog)) { in ccp_destroy() [all …]
|
D | ccp-dev.h | 217 struct list_head backlog; member
|
/linux-4.4.14/tools/perf/ui/tui/ |
D | helpline.c | 32 static int backlog; in tui_helpline__show() local 35 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show() 36 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show() 37 backlog += ret; in tui_helpline__show() 41 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show() 44 backlog = 0; in tui_helpline__show()
|
/linux-4.4.14/tools/perf/ui/gtk/ |
D | helpline.c | 31 static int backlog; in gtk_helpline_show() local 33 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show() 34 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show() 35 backlog += ret; in gtk_helpline_show() 39 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show() 42 backlog = 0; in gtk_helpline_show()
|
/linux-4.4.14/net/sched/ |
D | sch_gred.c | 40 u32 backlog; /* bytes on the virtualQ */ member 118 return sch->qstats.backlog; in gred_backlog() 120 return q->backlog; in gred_backlog() 168 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 234 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 262 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 265 if (!sch->qstats.backlog) in gred_dequeue() 268 if (!q->backlog) in gred_dequeue() 294 q->backlog -= len; in gred_drop() 298 if (!sch->qstats.backlog) in gred_drop() [all …]
|
D | sch_sfq.c | 112 unsigned int backlog; member 307 slot->backlog -= len; in sfq_drop() 374 slot->backlog = 0; /* should already be 0 anyway... */ in sfq_enqueue() 381 slot->backlog); in sfq_enqueue() 432 sch->qstats.backlog -= delta; in sfq_enqueue() 433 slot->backlog -= delta; in sfq_enqueue() 442 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue() 501 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue() 553 slot->backlog = 0; in sfq_rehash() 584 slot->backlog); in sfq_rehash() [all …]
|
D | sch_hhf.c | 375 prev_backlog = sch->qstats.backlog; in hhf_qdisc_drop() 377 return prev_backlog - sch->qstats.backlog; in hhf_qdisc_drop() 413 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 422 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 582 prev_backlog = sch->qstats.backlog; in hhf_change() 589 prev_backlog - sch->qstats.backlog); in hhf_change()
|
D | sch_red.c | 67 child->qstats.backlog); in red_enqueue() 214 q->qdisc->qstats.backlog); in red_change() 272 sch->qstats.backlog = q->qdisc->qstats.backlog; in red_dump()
|
D | sch_fq_codel.c | 170 prev_backlog = sch->qstats.backlog; in fq_codel_qdisc_drop() 172 return prev_backlog - sch->qstats.backlog; in fq_codel_qdisc_drop() 206 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 215 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in fq_codel_enqueue() 264 prev_backlog = sch->qstats.backlog; in fq_codel_dequeue() 588 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
|
D | sch_mqprio.c | 239 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump() 350 qstats.backlog += qdisc->qstats.backlog; in mqprio_dump_class_stats()
|
D | sch_mq.c | 115 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump()
|
D | sch_pie.c | 119 if (sch->qstats.backlog < 2 * mtu) in drop_early() 249 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue() 314 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
|
D | sch_tbf.c | 405 q->qdisc->qstats.backlog); in tbf_change() 459 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
|
D | sch_drr.c | 56 unsigned int backlog = cl->qdisc->qstats.backlog; in drr_purge_queue() local 59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in drr_purge_queue()
|
D | sch_htb.c | 960 sch->qstats.backlog -= len; in htb_drop() 995 sch->qstats.backlog = 0; in htb_reset() 1291 unsigned int backlog = cl->un.leaf.q->qstats.backlog; in htb_delete() local 1294 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); in htb_delete() 1428 unsigned int backlog = parent->un.leaf.q->qstats.backlog; in htb_change_class() local 1432 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog); in htb_change_class()
|
D | sch_hfsc.c | 898 unsigned int backlog = cl->qdisc->qstats.backlog; in hfsc_purge_queue() local 901 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in hfsc_purge_queue() 1370 cl->qstats.backlog = cl->qdisc->qstats.backlog; in hfsc_dump_class_stats() 1565 sch->qstats.backlog = 0; in hfsc_dump_qdisc() 1568 sch->qstats.backlog += cl->qdisc->qstats.backlog; in hfsc_dump_qdisc()
|
D | sch_plug.c | 93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
|
D | sch_multiq.c | 222 child->qstats.backlog); in multiq_tune() 244 old->qstats.backlog); in multiq_tune()
|
D | sch_prio.c | 194 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); in prio_tune() 215 old->qstats.backlog); in prio_tune()
|
D | sch_sfb.c | 514 q->qdisc->qstats.backlog); in sfb_change() 566 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
D | sch_fifo.c | 24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue()
|
D | sch_qfq.c | 223 unsigned int backlog = cl->qdisc->qstats.backlog; in qfq_purge_queue() local 226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in qfq_purge_queue()
|
D | sch_cbq.c | 1912 unsigned int qlen, backlog; in cbq_delete() local 1920 backlog = cl->q->qstats.backlog; in cbq_delete() 1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); in cbq_delete()
|
D | sch_dsmark.c | 400 sch->qstats.backlog = 0; in dsmark_reset()
|
D | sch_generic.c | 532 qdisc->qstats.backlog = 0; in pfifo_fast_reset()
|
D | sch_api.c | 778 sch->qstats.backlog -= len; in qdisc_tree_reduce_backlog()
|
/linux-4.4.14/net/core/ |
D | gen_stats.c | 231 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 247 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() 282 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|
D | dev.c | 3416 ____napi_schedule(sd, &sd->backlog); in rps_trigger_softirq() 3515 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { in enqueue_to_backlog() 3517 ____napi_schedule(sd, &sd->backlog); in enqueue_to_backlog() 4545 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog() 7783 sd->backlog.poll = process_backlog; in net_dev_init() 7784 sd->backlog.weight = weight_p; in net_dev_init()
|
D | sock.c | 2214 int sock_no_listen(struct socket *sock, int backlog) in sock_no_listen() argument
|
/linux-4.4.14/drivers/crypto/qce/ |
D | core.c | 82 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local 97 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue() 107 if (backlog) { in qce_handle_queue() 109 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
|
/linux-4.4.14/include/net/ |
D | red.h | 280 unsigned int backlog) in red_calc_qavg_no_idle_time() argument 291 return v->qavg + (backlog - (v->qavg >> p->Wlog)); in red_calc_qavg_no_idle_time() 296 unsigned int backlog) in red_calc_qavg() argument 299 return red_calc_qavg_no_idle_time(p, v, backlog); in red_calc_qavg()
|
D | inet_common.h | 30 int inet_listen(struct socket *sock, int backlog);
|
D | sch_generic.h | 545 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec() 551 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc() 699 sch->qstats.backlog = 0; in qdisc_reset_queue() 711 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); in qdisc_replace()
|
D | codel.h | 244 sch->qstats.backlog -= qdisc_pkt_len(skb); in codel_should_drop() 250 sch->qstats.backlog <= params->mtu) { in codel_should_drop()
|
D | inet_connection_sock.h | 317 int inet_csk_listen_start(struct sock *sk, int backlog);
|
/linux-4.4.14/drivers/net/ipvlan/ |
D | ipvlan_core.c | 205 spin_lock_bh(&port->backlog.lock); in ipvlan_process_multicast() 206 skb_queue_splice_tail_init(&port->backlog, &list); in ipvlan_process_multicast() 207 spin_unlock_bh(&port->backlog.lock); in ipvlan_process_multicast() 466 spin_lock(&port->backlog.lock); in ipvlan_multicast_enqueue() 467 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { in ipvlan_multicast_enqueue() 468 __skb_queue_tail(&port->backlog, skb); in ipvlan_multicast_enqueue() 469 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue() 472 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
|
D | ipvlan.h | 97 struct sk_buff_head backlog; member
|
D | ipvlan_main.c | 57 skb_queue_head_init(&port->backlog); in ipvlan_port_create() 79 __skb_queue_purge(&port->backlog); in ipvlan_port_destroy()
|
/linux-4.4.14/include/rdma/ |
D | iw_cm.h | 123 int backlog); 181 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
|
D | rdma_cm.h | 285 int rdma_listen(struct rdma_cm_id *id, int backlog);
|
/linux-4.4.14/drivers/crypto/ |
D | mxs-dcp.c | 347 struct crypto_async_request *backlog; in dcp_chan_thread_aes() local 356 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes() 360 if (backlog) in dcp_chan_thread_aes() 361 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes() 640 struct crypto_async_request *backlog; in dcp_chan_thread_sha() local 652 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha() 656 if (backlog) in dcp_chan_thread_sha() 657 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
|
D | s5p-sss.c | 466 struct crypto_async_request *async_req, *backlog; in s5p_tasklet_cb() local 471 backlog = crypto_get_backlog(&dev->queue); in s5p_tasklet_cb() 481 if (backlog) in s5p_tasklet_cb() 482 backlog->complete(backlog, -EINPROGRESS); in s5p_tasklet_cb()
|
D | bfin_crc.c | 282 struct crypto_async_request *async_req, *backlog; in bfin_crypto_crc_handle_queue() local 298 backlog = crypto_get_backlog(&crc->queue); in bfin_crypto_crc_handle_queue() 307 if (backlog) in bfin_crypto_crc_handle_queue() 308 backlog->complete(backlog, -EINPROGRESS); in bfin_crypto_crc_handle_queue()
|
D | mv_cesa.c | 603 struct crypto_async_request *backlog = NULL; in queue_manag() local 612 backlog = crypto_get_backlog(&cpg->queue); in queue_manag() 621 if (backlog) { in queue_manag() 622 backlog->complete(backlog, -EINPROGRESS); in queue_manag() 623 backlog = NULL; in queue_manag()
|
D | img-hash.c | 497 struct crypto_async_request *async_req, *backlog; in img_hash_handle_queue() local 512 backlog = crypto_get_backlog(&hdev->queue); in img_hash_handle_queue() 522 if (backlog) in img_hash_handle_queue() 523 backlog->complete(backlog, -EINPROGRESS); in img_hash_handle_queue()
|
D | omap-des.c | 592 struct crypto_async_request *async_req, *backlog; in omap_des_handle_queue() local 605 backlog = crypto_get_backlog(&dd->queue); in omap_des_handle_queue() 614 if (backlog) in omap_des_handle_queue() 615 backlog->complete(backlog, -EINPROGRESS); in omap_des_handle_queue()
|
D | omap-aes.c | 611 struct crypto_async_request *async_req, *backlog; in omap_aes_handle_queue() local 624 backlog = crypto_get_backlog(&dd->queue); in omap_aes_handle_queue() 633 if (backlog) in omap_aes_handle_queue() 634 backlog->complete(backlog, -EINPROGRESS); in omap_aes_handle_queue()
|
D | atmel-aes.c | 565 struct crypto_async_request *async_req, *backlog; in atmel_aes_handle_queue() local 578 backlog = crypto_get_backlog(&dd->queue); in atmel_aes_handle_queue() 587 if (backlog) in atmel_aes_handle_queue() 588 backlog->complete(backlog, -EINPROGRESS); in atmel_aes_handle_queue()
|
D | atmel-tdes.c | 591 struct crypto_async_request *async_req, *backlog; in atmel_tdes_handle_queue() local 604 backlog = crypto_get_backlog(&dd->queue); in atmel_tdes_handle_queue() 613 if (backlog) in atmel_tdes_handle_queue() 614 backlog->complete(backlog, -EINPROGRESS); in atmel_tdes_handle_queue()
|
D | atmel-sha.c | 832 struct crypto_async_request *async_req, *backlog; in atmel_sha_handle_queue() local 846 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue() 856 if (backlog) in atmel_sha_handle_queue() 857 backlog->complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
|
D | sahara.c | 1039 struct crypto_async_request *backlog; in sahara_queue_manage() local 1046 backlog = crypto_get_backlog(&dev->queue); in sahara_queue_manage() 1050 if (backlog) in sahara_queue_manage() 1051 backlog->complete(backlog, -EINPROGRESS); in sahara_queue_manage()
|
D | omap-sham.c | 1016 struct crypto_async_request *async_req, *backlog; in omap_sham_handle_queue() local 1028 backlog = crypto_get_backlog(&dd->queue); in omap_sham_handle_queue() 1037 if (backlog) in omap_sham_handle_queue() 1038 backlog->complete(backlog, -EINPROGRESS); in omap_sham_handle_queue()
|
D | hifn_795x.c | 2137 struct crypto_async_request *async_req, *backlog; in hifn_process_queue() local 2144 backlog = crypto_get_backlog(&dev->queue); in hifn_process_queue() 2151 if (backlog) in hifn_process_queue() 2152 backlog->complete(backlog, -EINPROGRESS); in hifn_process_queue()
|
/linux-4.4.14/net/tipc/ |
D | link.c | 655 lim = l->window + l->backlog[imp].limit; in link_prepare_wakeup() 657 if ((pnd[imp] + l->backlog[imp].len) >= lim) in link_prepare_wakeup() 680 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; in tipc_link_reset() 681 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; in tipc_link_reset() 682 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; in tipc_link_reset() 683 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; in tipc_link_reset() 684 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; in tipc_link_reset() 727 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) in tipc_link_xmit() 761 l->backlog[msg_importance(buf_msg(bskb))].len++; in tipc_link_xmit() 766 l->backlog[imp].len += skb_queue_len(list); in tipc_link_xmit() [all …]
|
D | link.h | 192 } backlog[5]; member
|
/linux-4.4.14/drivers/crypto/marvell/ |
D | cesa.c | 42 struct crypto_async_request *req, *backlog; in mv_cesa_dequeue_req_unlocked() local 46 backlog = crypto_get_backlog(&cesa_dev->queue); in mv_cesa_dequeue_req_unlocked() 54 if (backlog) in mv_cesa_dequeue_req_unlocked() 55 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_dequeue_req_unlocked()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.h | 172 int backlog; member 218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
|
D | iwch_cm.c | 1986 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) in iwch_create_listen() argument 2010 ep->backlog = backlog; in iwch_create_listen()
|
/linux-4.4.14/include/uapi/linux/ |
D | gen_stats.h | 61 __u32 backlog; member
|
D | pkt_sched.h | 41 __u32 backlog; member 282 __u32 backlog; member
|
D | audit.h | 420 __u32 backlog; /* messages waiting in queue */ member
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_cm.c | 135 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) in c2_llp_service_create() argument 166 wr.backlog = cpu_to_be32(backlog); in c2_llp_service_create()
|
D | c2_provider.c | 631 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument 636 err = c2_llp_service_create(cm_id, backlog); in c2_service_create()
|
D | c2.h | 527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
|
D | c2_wr.h | 1282 __be32 backlog; /* tradional tcp listen bl */ member
|
/linux-4.4.14/include/crypto/ |
D | algapi.h | 73 struct list_head *backlog; member 326 return queue->backlog == &queue->list ? NULL : in crypto_get_backlog() 327 container_of(queue->backlog, struct crypto_async_request, list); in crypto_get_backlog()
|
/linux-4.4.14/crypto/ |
D | algapi.c | 878 queue->backlog = &queue->list; in crypto_init_queue() 893 if (queue->backlog == &queue->list) in crypto_enqueue_request() 894 queue->backlog = &request->list; in crypto_enqueue_request() 914 if (queue->backlog != &queue->list) in crypto_dequeue_request() 915 queue->backlog = queue->backlog->next; in crypto_dequeue_request()
|
D | mcryptd.c | 156 struct crypto_async_request *req, *backlog; in mcryptd_queue_worker() local 173 backlog = crypto_get_backlog(&cpu_queue->queue); in mcryptd_queue_worker() 183 if (backlog) in mcryptd_queue_worker() 184 backlog->complete(backlog, -EINPROGRESS); in mcryptd_queue_worker()
|
D | cryptd.c | 137 struct crypto_async_request *req, *backlog; in cryptd_queue_worker() local 148 backlog = crypto_get_backlog(&cpu_queue->queue); in cryptd_queue_worker() 156 if (backlog) in cryptd_queue_worker() 157 backlog->complete(backlog, -EINPROGRESS); in cryptd_queue_worker()
|
/linux-4.4.14/drivers/atm/ |
D | zatm.h | 54 struct sk_buff_head backlog; /* list of buffers waiting for ring */ member
|
D | eni.h | 49 struct sk_buff_head backlog; /* queue of waiting TX buffers */ member
|
D | zatm.c | 731 while ((skb = skb_dequeue(&zatm_vcc->backlog))) in dequeue_tx() 733 skb_queue_head(&zatm_vcc->backlog,skb); in dequeue_tx() 880 if (skb_peek(&zatm_vcc->backlog)) { in close_tx() 883 wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); in close_tx() 964 skb_queue_head_init(&zatm_vcc->backlog); in open_tx_first() 1544 skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); in zatm_send()
|
D | lanai.c | 243 struct sk_buff_head backlog; member 777 while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL) in lanai_shutdown_tx_vci() 1153 return !skb_queue_empty(&lvcc->tx.backlog); in vcc_is_backlogged() 1312 skb = skb_dequeue(&lvcc->tx.backlog); in vcc_tx_unqueue_aal5() 1318 skb_queue_head(&lvcc->tx.backlog, skb); in vcc_tx_unqueue_aal5() 1344 skb_queue_tail(&lvcc->tx.backlog, skb); in vcc_tx_aal5() 1474 skb_queue_head_init(&lvcc->tx.backlog); in new_lanai_vcc()
|
D | eni.c | 1193 while ((skb = skb_dequeue(&tx->backlog))) { in poll_tx() 1197 skb_queue_head(&tx->backlog,skb); in poll_tx() 1332 skb_queue_head_init(&tx->backlog); in reserve_or_set_tx() 1407 txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; in close_tx() 2077 skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); in eni_send() 2170 skb_queue_len(&tx->backlog)); in eni_proc_read()
|
/linux-4.4.14/include/linux/ |
D | tcp.h | 392 static inline void fastopen_queue_tune(struct sock *sk, int backlog) in fastopen_queue_tune() argument 397 queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); in fastopen_queue_tune()
|
D | scif.h | 233 int scif_listen(scif_epd_t epd, int backlog);
|
D | net.h | 276 int kernel_listen(struct socket *sock, int backlog);
|
D | security.h | 1119 int security_socket_listen(struct socket *sock, int backlog); 1197 static inline int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument
|
D | lsm_hooks.h | 1535 int (*socket_listen)(struct socket *sock, int backlog);
|
D | netdevice.h | 2566 struct napi_struct backlog; member
|
/linux-4.4.14/Documentation/networking/ |
D | x25-iface.txt | 82 kernel if the backlog queue is congested. 96 The probability of packet loss due to backlog congestion can be 121 when a previously congested backlog queue becomes empty again.
|
D | scaling.txt | 107 on the desired CPU’s backlog queue and waking up the CPU for processing. 132 and the packet is queued to the tail of that CPU’s backlog queue. At 134 packets have been queued to their backlog queue. The IPI wakes backlog 234 to enqueue packets onto the backlog of another CPU and to wake up that 266 CPU's backlog when a packet in this flow was last enqueued. Each backlog 279 table), the packet is enqueued onto that CPU’s backlog. If they differ,
|
D | gen_stats.txt | 26 mystruct->qstats.backlog += skb->pkt_len;
|
D | nf_conntrack-sysctl.txt | 51 will take longer for a backlog to be processed.
|
D | cxgb.txt | 144 Setting maximum backlog (# of unprocessed packets before kernel drops):
|
D | ip-sysctl.txt | 176 Limit of socket listen() backlog, known in userspace as SOMAXCONN. 536 Send out syncookies when the syn backlog queue of a socket
|
/linux-4.4.14/drivers/infiniband/core/ |
D | iwcm.c | 435 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) in iw_cm_listen() argument 443 if (!backlog) in iw_cm_listen() 444 backlog = default_backlog; in iw_cm_listen() 446 ret = alloc_work_entries(cm_id_priv, backlog); in iw_cm_listen() 455 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); in iw_cm_listen()
|
D | ucma.c | 86 int backlog; member 343 if (!ctx->backlog) { in ucma_event_handler() 348 ctx->backlog--; in ucma_event_handler() 410 uevent->ctx->backlog++; in ucma_get_event() 1051 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? in ucma_listen() 1052 cmd.backlog : max_backlog; in ucma_listen() 1053 ret = rdma_listen(ctx->cm_id, ctx->backlog); in ucma_listen()
|
D | cma.c | 213 int backlog; member 1995 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument 2012 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen() 2060 ret = rdma_listen(id, id_priv->backlog); in cma_listen_on_dev() 2923 int rdma_listen(struct rdma_cm_id *id, int backlog) in rdma_listen() argument 2945 id_priv->backlog = backlog; in rdma_listen() 2952 ret = cma_iw_listen(id_priv, backlog); in rdma_listen() 2964 id_priv->backlog = 0; in rdma_listen()
|
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/ |
D | lib-socket.c | 495 int backlog) in lnet_sock_listen() argument 508 rc = kernel_listen(*sockp, backlog); in lnet_sock_listen() 512 CERROR("Can't set listen backlog %d: %d\n", backlog, rc); in lnet_sock_listen()
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
D | nes_cm.h | 303 int backlog; member 377 int backlog; member
|
D | nes_cm.c | 2060 cm_node->listener->backlog) { in handle_syn_pkt() 2466 listener->backlog = cm_info->backlog; in mini_cm_listen() 2479 listener, listener->backlog, listener->cm_id); in mini_cm_listen() 3633 int nes_create_listen(struct iw_cm_id *cm_id, int backlog) in nes_create_listen() argument 3659 cm_info.backlog = backlog; in nes_create_listen()
|
/linux-4.4.14/net/dccp/ |
D | proto.c | 234 static inline int dccp_listen_start(struct sock *sk, int backlog) in dccp_listen_start() argument 242 return inet_csk_listen_start(sk, backlog); in dccp_listen_start() 915 int inet_dccp_listen(struct socket *sock, int backlog) in inet_dccp_listen() argument 939 err = dccp_listen_start(sk, backlog); in inet_dccp_listen() 943 sk->sk_max_ack_backlog = backlog; in inet_dccp_listen()
|
D | dccp.h | 318 int inet_dccp_listen(struct socket *sock, int backlog);
|
/linux-4.4.14/net/sunrpc/ |
D | xprt.c | 988 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_transmit() 1010 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_add_backlog() 1015 if (rpc_wake_up_next(&xprt->backlog) == NULL) in xprt_wake_up_backlog() 1027 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_throttle_congested() 1329 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init() 1409 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy()
|
/linux-4.4.14/drivers/misc/mic/scif/ |
D | scif_epd.h | 110 int backlog; member
|
D | scif_epd.c | 145 if (ep->backlog <= ep->conreqcnt) { in scif_cnctreq()
|
D | scif_api.c | 394 int scif_listen(scif_epd_t epd, int backlog) in scif_listen() argument 420 ep->backlog = backlog; in scif_listen()
|
/linux-4.4.14/net/atm/ |
D | svc.c | 281 static int svc_listen(struct socket *sock, int backlog) in svc_listen() argument 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; in svc_listen()
|
/linux-4.4.14/net/rxrpc/ |
D | af_rxrpc.c | 188 static int rxrpc_listen(struct socket *sock, int backlog) in rxrpc_listen() argument 194 _enter("%p,%d", rx, backlog); in rxrpc_listen() 209 sk->sk_max_ack_backlog = backlog; in rxrpc_listen()
|
/linux-4.4.14/drivers/net/wireless/p54/ |
D | lmac.h | 258 u8 backlog; member 452 u8 backlog; member
|
D | txrx.c | 921 txhdr->backlog = priv->tx_stats[queue].len - 1; in p54_tx_80211()
|
/linux-4.4.14/include/uapi/rdma/ |
D | rdma_user_cm.h | 208 __u32 backlog; member
|
/linux-4.4.14/net/llc/ |
D | af_llc.c | 505 static int llc_ui_listen(struct socket *sock, int backlog) in llc_ui_listen() argument 520 if (!(unsigned int)backlog) /* BSDism */ in llc_ui_listen() 521 backlog = 1; in llc_ui_listen() 522 sk->sk_max_ack_backlog = backlog; in llc_ui_listen()
|
/linux-4.4.14/net/nfc/ |
D | llcp_sock.c | 197 static int llcp_sock_listen(struct socket *sock, int backlog) in llcp_sock_listen() argument 202 pr_debug("sk %p backlog %d\n", sk, backlog); in llcp_sock_listen() 212 sk->sk_max_ack_backlog = backlog; in llcp_sock_listen()
|
/linux-4.4.14/net/bluetooth/rfcomm/ |
D | sock.c | 424 static int rfcomm_sock_listen(struct socket *sock, int backlog) in rfcomm_sock_listen() argument 429 BT_DBG("sk %p backlog %d", sk, backlog); in rfcomm_sock_listen() 464 sk->sk_max_ack_backlog = backlog; in rfcomm_sock_listen()
|
/linux-4.4.14/net/bluetooth/ |
D | sco.c | 588 static int sco_sock_listen(struct socket *sock, int backlog) in sco_sock_listen() argument 594 BT_DBG("sk %p backlog %d", sk, backlog); in sco_sock_listen() 615 sk->sk_max_ack_backlog = backlog; in sco_sock_listen()
|
D | l2cap_sock.c | 251 static int l2cap_sock_listen(struct socket *sock, int backlog) in l2cap_sock_listen() argument 257 BT_DBG("sk %p backlog %d", sk, backlog); in l2cap_sock_listen() 285 sk->sk_max_ack_backlog = backlog; in l2cap_sock_listen()
|
/linux-4.4.14/net/ipv4/ |
D | af_inet.c | 194 int inet_listen(struct socket *sock, int backlog) in inet_listen() argument 224 fastopen_queue_tune(sk, backlog); in inet_listen() 232 err = inet_csk_listen_start(sk, backlog); in inet_listen() 236 sk->sk_max_ack_backlog = backlog; in inet_listen()
|
D | inet_connection_sock.c | 736 int inet_csk_listen_start(struct sock *sk, int backlog) in inet_csk_listen_start() argument 743 sk->sk_max_ack_backlog = backlog; in inet_csk_listen_start()
|
/linux-4.4.14/security/tomoyo/ |
D | tomoyo.c | 445 static int tomoyo_socket_listen(struct socket *sock, int backlog) in tomoyo_socket_listen() argument
|
/linux-4.4.14/net/phonet/ |
D | socket.c | 404 static int pn_socket_listen(struct socket *sock, int backlog) in pn_socket_listen() argument 422 sk->sk_max_ack_backlog = backlog; in pn_socket_listen()
|
/linux-4.4.14/net/ |
D | socket.c | 1392 SYSCALL_DEFINE2(listen, int, fd, int, backlog) in SYSCALL_DEFINE2() argument 1401 if ((unsigned int)backlog > somaxconn) in SYSCALL_DEFINE2() 1402 backlog = somaxconn; in SYSCALL_DEFINE2() 1404 err = security_socket_listen(sock, backlog); in SYSCALL_DEFINE2() 1406 err = sock->ops->listen(sock, backlog); in SYSCALL_DEFINE2() 3175 int kernel_listen(struct socket *sock, int backlog) in kernel_listen() argument 3177 return sock->ops->listen(sock, backlog); in kernel_listen()
|
D | Kconfig | 294 backlog reaches netdev_max_backlog. If a few out of many active flows
|
/linux-4.4.14/include/linux/sunrpc/ |
D | xprt.h | 188 struct rpc_wait_queue backlog; /* waiting for slot */ member
|
/linux-4.4.14/drivers/md/ |
D | bitmap.c | 2299 unsigned long backlog; in backlog_store() local 2300 int rv = kstrtoul(buf, 10, &backlog); in backlog_store() 2303 if (backlog > COUNTER_MAX) in backlog_store() 2305 mddev->bitmap_info.max_write_behind = backlog; in backlog_store() 2310 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | iw_cxgb4.h | 803 int backlog; member 953 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
|
D | device.c | 627 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep() 640 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep()
|
D | cm.c | 3335 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) in c4iw_create_listen() argument 3356 ep->backlog = backlog; in c4iw_create_listen()
|
/linux-4.4.14/include/net/sctp/ |
D | sctp.h | 102 int sctp_inet_listen(struct socket *sock, int backlog);
|
/linux-4.4.14/net/netrom/ |
D | af_netrom.c | 401 static int nr_listen(struct socket *sock, int backlog) in nr_listen() argument 408 sk->sk_max_ack_backlog = backlog; in nr_listen()
|
/linux-4.4.14/drivers/target/iscsi/ |
D | iscsi_target_login.c | 883 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; in iscsit_setup_np() local 963 ret = kernel_listen(sock, backlog); in iscsit_setup_np()
|
/linux-4.4.14/drivers/staging/lustre/include/linux/lnet/ |
D | lib-lnet.h | 648 int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
|
/linux-4.4.14/net/rose/ |
D | af_rose.c | 487 static int rose_listen(struct socket *sock, int backlog) in rose_listen() argument 498 sk->sk_max_ack_backlog = backlog; in rose_listen()
|
/linux-4.4.14/net/vmw_vsock/ |
D | af_vsock.c | 1322 static int vsock_listen(struct socket *sock, int backlog) in vsock_listen() argument 1349 sk->sk_max_ack_backlog = backlog; in vsock_listen()
|
/linux-4.4.14/net/sctp/ |
D | socket.c | 6307 static int sctp_listen_start(struct sock *sk, int backlog) in sctp_listen_start() argument 6348 sk->sk_max_ack_backlog = backlog; in sctp_listen_start() 6367 int sctp_inet_listen(struct socket *sock, int backlog) in sctp_inet_listen() argument 6373 if (unlikely(backlog < 0)) in sctp_inet_listen() 6386 if (!backlog) { in sctp_inet_listen() 6400 sk->sk_max_ack_backlog = backlog; in sctp_inet_listen() 6402 err = sctp_listen_start(sk, backlog); in sctp_inet_listen()
|
/linux-4.4.14/Documentation/ |
D | iostats.txt | 91 I/O completion time and the backlog that may be accumulating.
|
D | md.txt | 318 bitmap/backlog 322 'backlog' sets a limit on the number of concurrent background
|
/linux-4.4.14/net/x25/ |
D | af_x25.c | 495 static int x25_listen(struct socket *sock, int backlog) in x25_listen() argument 503 sk->sk_max_ack_backlog = backlog; in x25_listen()
|
/linux-4.4.14/net/unix/ |
D | af_unix.c | 602 static int unix_listen(struct socket *sock, int backlog) in unix_listen() argument 618 if (backlog > sk->sk_max_ack_backlog) in unix_listen() 620 sk->sk_max_ack_backlog = backlog; in unix_listen()
|
/linux-4.4.14/net/ax25/ |
D | af_ax25.c | 773 static int ax25_listen(struct socket *sock, int backlog) in ax25_listen() argument 780 sk->sk_max_ack_backlog = backlog; in ax25_listen()
|
/linux-4.4.14/net/irda/ |
D | af_irda.c | 735 static int irda_listen(struct socket *sock, int backlog) in irda_listen() argument 747 sk->sk_max_ack_backlog = backlog; in irda_listen()
|
/linux-4.4.14/security/ |
D | security.c | 1218 int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument 1220 return call_int_hook(socket_listen, 0, sock, backlog); in security_socket_listen()
|
/linux-4.4.14/net/decnet/ |
D | af_decnet.c | 1274 static int dn_listen(struct socket *sock, int backlog) in dn_listen() argument 1287 sk->sk_max_ack_backlog = backlog; in dn_listen()
|
/linux-4.4.14/net/iucv/ |
D | af_iucv.c | 904 static int iucv_sock_listen(struct socket *sock, int backlog) in iucv_sock_listen() argument 918 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
|
/linux-4.4.14/kernel/ |
D | audit.c | 851 s.backlog = skb_queue_len(&audit_skb_queue); in audit_receive_msg()
|
/linux-4.4.14/security/selinux/ |
D | hooks.c | 4253 static int selinux_socket_listen(struct socket *sock, int backlog) in selinux_socket_listen() argument
|