Home
last modified time | relevance | path

Searched refs:backlog (Results 1 – 138 of 138) sorted by relevance

/linux-4.4.14/drivers/crypto/ccp/
Dccp-crypto-main.c47 struct list_head *backlog; member
89 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() argument
94 *backlog = NULL; in ccp_crypto_cmd_complete()
113 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete()
115 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete()
116 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete()
118 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete()
120 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete()
123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete()
124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete()
[all …]
Dccp-dev.c114 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd()
174 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local
199 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd()
200 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd()
202 list_del(&backlog->entry); in ccp_dequeue_cmd()
207 if (backlog) { in ccp_dequeue_cmd()
208 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); in ccp_dequeue_cmd()
209 schedule_work(&backlog->work); in ccp_dequeue_cmd()
304 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct()
513 while (!list_empty(&ccp->backlog)) { in ccp_destroy()
[all …]
Dccp-dev.h217 struct list_head backlog; member
/linux-4.4.14/tools/perf/ui/tui/
Dhelpline.c32 static int backlog; in tui_helpline__show() local
35 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show()
36 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show()
37 backlog += ret; in tui_helpline__show()
41 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show()
44 backlog = 0; in tui_helpline__show()
/linux-4.4.14/tools/perf/ui/gtk/
Dhelpline.c31 static int backlog; in gtk_helpline_show() local
33 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show()
34 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show()
35 backlog += ret; in gtk_helpline_show()
39 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show()
42 backlog = 0; in gtk_helpline_show()
/linux-4.4.14/net/sched/
Dsch_gred.c40 u32 backlog; /* bytes on the virtualQ */ member
118 return sch->qstats.backlog; in gred_backlog()
120 return q->backlog; in gred_backlog()
168 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue()
234 q->backlog += qdisc_pkt_len(skb); in gred_enqueue()
262 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue()
265 if (!sch->qstats.backlog) in gred_dequeue()
268 if (!q->backlog) in gred_dequeue()
294 q->backlog -= len; in gred_drop()
298 if (!sch->qstats.backlog) in gred_drop()
[all …]
Dsch_sfq.c112 unsigned int backlog; member
307 slot->backlog -= len; in sfq_drop()
374 slot->backlog = 0; /* should already be 0 anyway... */ in sfq_enqueue()
381 slot->backlog); in sfq_enqueue()
432 sch->qstats.backlog -= delta; in sfq_enqueue()
433 slot->backlog -= delta; in sfq_enqueue()
442 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue()
501 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue()
553 slot->backlog = 0; in sfq_rehash()
584 slot->backlog); in sfq_rehash()
[all …]
Dsch_hhf.c375 prev_backlog = sch->qstats.backlog; in hhf_qdisc_drop()
377 return prev_backlog - sch->qstats.backlog; in hhf_qdisc_drop()
413 prev_backlog = sch->qstats.backlog; in hhf_enqueue()
422 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue()
582 prev_backlog = sch->qstats.backlog; in hhf_change()
589 prev_backlog - sch->qstats.backlog); in hhf_change()
Dsch_red.c67 child->qstats.backlog); in red_enqueue()
214 q->qdisc->qstats.backlog); in red_change()
272 sch->qstats.backlog = q->qdisc->qstats.backlog; in red_dump()
Dsch_fq_codel.c170 prev_backlog = sch->qstats.backlog; in fq_codel_qdisc_drop()
172 return prev_backlog - sch->qstats.backlog; in fq_codel_qdisc_drop()
206 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue()
215 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in fq_codel_enqueue()
264 prev_backlog = sch->qstats.backlog; in fq_codel_dequeue()
588 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
Dsch_mqprio.c239 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump()
350 qstats.backlog += qdisc->qstats.backlog; in mqprio_dump_class_stats()
Dsch_mq.c115 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump()
Dsch_pie.c119 if (sch->qstats.backlog < 2 * mtu) in drop_early()
249 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue()
314 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
Dsch_tbf.c405 q->qdisc->qstats.backlog); in tbf_change()
459 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
Dsch_drr.c56 unsigned int backlog = cl->qdisc->qstats.backlog; in drr_purge_queue() local
59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in drr_purge_queue()
Dsch_htb.c960 sch->qstats.backlog -= len; in htb_drop()
995 sch->qstats.backlog = 0; in htb_reset()
1291 unsigned int backlog = cl->un.leaf.q->qstats.backlog; in htb_delete() local
1294 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog); in htb_delete()
1428 unsigned int backlog = parent->un.leaf.q->qstats.backlog; in htb_change_class() local
1432 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog); in htb_change_class()
Dsch_hfsc.c898 unsigned int backlog = cl->qdisc->qstats.backlog; in hfsc_purge_queue() local
901 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in hfsc_purge_queue()
1370 cl->qstats.backlog = cl->qdisc->qstats.backlog; in hfsc_dump_class_stats()
1565 sch->qstats.backlog = 0; in hfsc_dump_qdisc()
1568 sch->qstats.backlog += cl->qdisc->qstats.backlog; in hfsc_dump_qdisc()
Dsch_plug.c93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
Dsch_multiq.c222 child->qstats.backlog); in multiq_tune()
244 old->qstats.backlog); in multiq_tune()
Dsch_prio.c194 qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); in prio_tune()
215 old->qstats.backlog); in prio_tune()
Dsch_sfb.c514 q->qdisc->qstats.backlog); in sfb_change()
566 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
Dsch_fifo.c24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue()
Dsch_qfq.c223 unsigned int backlog = cl->qdisc->qstats.backlog; in qfq_purge_queue() local
226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); in qfq_purge_queue()
Dsch_cbq.c1912 unsigned int qlen, backlog; in cbq_delete() local
1920 backlog = cl->q->qstats.backlog; in cbq_delete()
1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog); in cbq_delete()
Dsch_dsmark.c400 sch->qstats.backlog = 0; in dsmark_reset()
Dsch_generic.c532 qdisc->qstats.backlog = 0; in pfifo_fast_reset()
Dsch_api.c778 sch->qstats.backlog -= len; in qdisc_tree_reduce_backlog()
/linux-4.4.14/net/core/
Dgen_stats.c231 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu()
247 qstats->backlog = q->backlog; in __gnet_stats_copy_queue()
282 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
Ddev.c3416 ____napi_schedule(sd, &sd->backlog); in rps_trigger_softirq()
3515 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { in enqueue_to_backlog()
3517 ____napi_schedule(sd, &sd->backlog); in enqueue_to_backlog()
4545 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog()
7783 sd->backlog.poll = process_backlog; in net_dev_init()
7784 sd->backlog.weight = weight_p; in net_dev_init()
Dsock.c2214 int sock_no_listen(struct socket *sock, int backlog) in sock_no_listen() argument
/linux-4.4.14/drivers/crypto/qce/
Dcore.c82 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local
97 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue()
107 if (backlog) { in qce_handle_queue()
109 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
/linux-4.4.14/include/net/
Dred.h280 unsigned int backlog) in red_calc_qavg_no_idle_time() argument
291 return v->qavg + (backlog - (v->qavg >> p->Wlog)); in red_calc_qavg_no_idle_time()
296 unsigned int backlog) in red_calc_qavg() argument
299 return red_calc_qavg_no_idle_time(p, v, backlog); in red_calc_qavg()
Dinet_common.h30 int inet_listen(struct socket *sock, int backlog);
Dsch_generic.h545 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec()
551 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc()
699 sch->qstats.backlog = 0; in qdisc_reset_queue()
711 qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); in qdisc_replace()
Dcodel.h244 sch->qstats.backlog -= qdisc_pkt_len(skb); in codel_should_drop()
250 sch->qstats.backlog <= params->mtu) { in codel_should_drop()
Dinet_connection_sock.h317 int inet_csk_listen_start(struct sock *sk, int backlog);
/linux-4.4.14/drivers/net/ipvlan/
Dipvlan_core.c205 spin_lock_bh(&port->backlog.lock); in ipvlan_process_multicast()
206 skb_queue_splice_tail_init(&port->backlog, &list); in ipvlan_process_multicast()
207 spin_unlock_bh(&port->backlog.lock); in ipvlan_process_multicast()
466 spin_lock(&port->backlog.lock); in ipvlan_multicast_enqueue()
467 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { in ipvlan_multicast_enqueue()
468 __skb_queue_tail(&port->backlog, skb); in ipvlan_multicast_enqueue()
469 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
472 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
Dipvlan.h97 struct sk_buff_head backlog; member
Dipvlan_main.c57 skb_queue_head_init(&port->backlog); in ipvlan_port_create()
79 __skb_queue_purge(&port->backlog); in ipvlan_port_destroy()
/linux-4.4.14/include/rdma/
Diw_cm.h123 int backlog);
181 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
Drdma_cm.h285 int rdma_listen(struct rdma_cm_id *id, int backlog);
/linux-4.4.14/drivers/crypto/
Dmxs-dcp.c347 struct crypto_async_request *backlog; in dcp_chan_thread_aes() local
356 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes()
360 if (backlog) in dcp_chan_thread_aes()
361 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes()
640 struct crypto_async_request *backlog; in dcp_chan_thread_sha() local
652 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha()
656 if (backlog) in dcp_chan_thread_sha()
657 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
Ds5p-sss.c466 struct crypto_async_request *async_req, *backlog; in s5p_tasklet_cb() local
471 backlog = crypto_get_backlog(&dev->queue); in s5p_tasklet_cb()
481 if (backlog) in s5p_tasklet_cb()
482 backlog->complete(backlog, -EINPROGRESS); in s5p_tasklet_cb()
Dbfin_crc.c282 struct crypto_async_request *async_req, *backlog; in bfin_crypto_crc_handle_queue() local
298 backlog = crypto_get_backlog(&crc->queue); in bfin_crypto_crc_handle_queue()
307 if (backlog) in bfin_crypto_crc_handle_queue()
308 backlog->complete(backlog, -EINPROGRESS); in bfin_crypto_crc_handle_queue()
Dmv_cesa.c603 struct crypto_async_request *backlog = NULL; in queue_manag() local
612 backlog = crypto_get_backlog(&cpg->queue); in queue_manag()
621 if (backlog) { in queue_manag()
622 backlog->complete(backlog, -EINPROGRESS); in queue_manag()
623 backlog = NULL; in queue_manag()
Dimg-hash.c497 struct crypto_async_request *async_req, *backlog; in img_hash_handle_queue() local
512 backlog = crypto_get_backlog(&hdev->queue); in img_hash_handle_queue()
522 if (backlog) in img_hash_handle_queue()
523 backlog->complete(backlog, -EINPROGRESS); in img_hash_handle_queue()
Domap-des.c592 struct crypto_async_request *async_req, *backlog; in omap_des_handle_queue() local
605 backlog = crypto_get_backlog(&dd->queue); in omap_des_handle_queue()
614 if (backlog) in omap_des_handle_queue()
615 backlog->complete(backlog, -EINPROGRESS); in omap_des_handle_queue()
Domap-aes.c611 struct crypto_async_request *async_req, *backlog; in omap_aes_handle_queue() local
624 backlog = crypto_get_backlog(&dd->queue); in omap_aes_handle_queue()
633 if (backlog) in omap_aes_handle_queue()
634 backlog->complete(backlog, -EINPROGRESS); in omap_aes_handle_queue()
Datmel-aes.c565 struct crypto_async_request *async_req, *backlog; in atmel_aes_handle_queue() local
578 backlog = crypto_get_backlog(&dd->queue); in atmel_aes_handle_queue()
587 if (backlog) in atmel_aes_handle_queue()
588 backlog->complete(backlog, -EINPROGRESS); in atmel_aes_handle_queue()
Datmel-tdes.c591 struct crypto_async_request *async_req, *backlog; in atmel_tdes_handle_queue() local
604 backlog = crypto_get_backlog(&dd->queue); in atmel_tdes_handle_queue()
613 if (backlog) in atmel_tdes_handle_queue()
614 backlog->complete(backlog, -EINPROGRESS); in atmel_tdes_handle_queue()
Datmel-sha.c832 struct crypto_async_request *async_req, *backlog; in atmel_sha_handle_queue() local
846 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue()
856 if (backlog) in atmel_sha_handle_queue()
857 backlog->complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
Dsahara.c1039 struct crypto_async_request *backlog; in sahara_queue_manage() local
1046 backlog = crypto_get_backlog(&dev->queue); in sahara_queue_manage()
1050 if (backlog) in sahara_queue_manage()
1051 backlog->complete(backlog, -EINPROGRESS); in sahara_queue_manage()
Domap-sham.c1016 struct crypto_async_request *async_req, *backlog; in omap_sham_handle_queue() local
1028 backlog = crypto_get_backlog(&dd->queue); in omap_sham_handle_queue()
1037 if (backlog) in omap_sham_handle_queue()
1038 backlog->complete(backlog, -EINPROGRESS); in omap_sham_handle_queue()
Dhifn_795x.c2137 struct crypto_async_request *async_req, *backlog; in hifn_process_queue() local
2144 backlog = crypto_get_backlog(&dev->queue); in hifn_process_queue()
2151 if (backlog) in hifn_process_queue()
2152 backlog->complete(backlog, -EINPROGRESS); in hifn_process_queue()
/linux-4.4.14/net/tipc/
Dlink.c655 lim = l->window + l->backlog[imp].limit; in link_prepare_wakeup()
657 if ((pnd[imp] + l->backlog[imp].len) >= lim) in link_prepare_wakeup()
680 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; in tipc_link_reset()
681 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; in tipc_link_reset()
682 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; in tipc_link_reset()
683 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; in tipc_link_reset()
684 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; in tipc_link_reset()
727 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) in tipc_link_xmit()
761 l->backlog[msg_importance(buf_msg(bskb))].len++; in tipc_link_xmit()
766 l->backlog[imp].len += skb_queue_len(list); in tipc_link_xmit()
[all …]
Dlink.h192 } backlog[5]; member
/linux-4.4.14/drivers/crypto/marvell/
Dcesa.c42 struct crypto_async_request *req, *backlog; in mv_cesa_dequeue_req_unlocked() local
46 backlog = crypto_get_backlog(&cesa_dev->queue); in mv_cesa_dequeue_req_unlocked()
54 if (backlog) in mv_cesa_dequeue_req_unlocked()
55 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_dequeue_req_unlocked()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_cm.h172 int backlog; member
218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
Diwch_cm.c1986 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) in iwch_create_listen() argument
2010 ep->backlog = backlog; in iwch_create_listen()
/linux-4.4.14/include/uapi/linux/
Dgen_stats.h61 __u32 backlog; member
Dpkt_sched.h41 __u32 backlog; member
282 __u32 backlog; member
Daudit.h420 __u32 backlog; /* messages waiting in queue */ member
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_cm.c135 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) in c2_llp_service_create() argument
166 wr.backlog = cpu_to_be32(backlog); in c2_llp_service_create()
Dc2_provider.c631 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument
636 err = c2_llp_service_create(cm_id, backlog); in c2_service_create()
Dc2.h527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
Dc2_wr.h1282 __be32 backlog; /* tradional tcp listen bl */ member
/linux-4.4.14/include/crypto/
Dalgapi.h73 struct list_head *backlog; member
326 return queue->backlog == &queue->list ? NULL : in crypto_get_backlog()
327 container_of(queue->backlog, struct crypto_async_request, list); in crypto_get_backlog()
/linux-4.4.14/crypto/
Dalgapi.c878 queue->backlog = &queue->list; in crypto_init_queue()
893 if (queue->backlog == &queue->list) in crypto_enqueue_request()
894 queue->backlog = &request->list; in crypto_enqueue_request()
914 if (queue->backlog != &queue->list) in crypto_dequeue_request()
915 queue->backlog = queue->backlog->next; in crypto_dequeue_request()
Dmcryptd.c156 struct crypto_async_request *req, *backlog; in mcryptd_queue_worker() local
173 backlog = crypto_get_backlog(&cpu_queue->queue); in mcryptd_queue_worker()
183 if (backlog) in mcryptd_queue_worker()
184 backlog->complete(backlog, -EINPROGRESS); in mcryptd_queue_worker()
Dcryptd.c137 struct crypto_async_request *req, *backlog; in cryptd_queue_worker() local
148 backlog = crypto_get_backlog(&cpu_queue->queue); in cryptd_queue_worker()
156 if (backlog) in cryptd_queue_worker()
157 backlog->complete(backlog, -EINPROGRESS); in cryptd_queue_worker()
/linux-4.4.14/drivers/atm/
Dzatm.h54 struct sk_buff_head backlog; /* list of buffers waiting for ring */ member
Deni.h49 struct sk_buff_head backlog; /* queue of waiting TX buffers */ member
Dzatm.c731 while ((skb = skb_dequeue(&zatm_vcc->backlog))) in dequeue_tx()
733 skb_queue_head(&zatm_vcc->backlog,skb); in dequeue_tx()
880 if (skb_peek(&zatm_vcc->backlog)) { in close_tx()
883 wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); in close_tx()
964 skb_queue_head_init(&zatm_vcc->backlog); in open_tx_first()
1544 skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); in zatm_send()
Dlanai.c243 struct sk_buff_head backlog; member
777 while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL) in lanai_shutdown_tx_vci()
1153 return !skb_queue_empty(&lvcc->tx.backlog); in vcc_is_backlogged()
1312 skb = skb_dequeue(&lvcc->tx.backlog); in vcc_tx_unqueue_aal5()
1318 skb_queue_head(&lvcc->tx.backlog, skb); in vcc_tx_unqueue_aal5()
1344 skb_queue_tail(&lvcc->tx.backlog, skb); in vcc_tx_aal5()
1474 skb_queue_head_init(&lvcc->tx.backlog); in new_lanai_vcc()
Deni.c1193 while ((skb = skb_dequeue(&tx->backlog))) { in poll_tx()
1197 skb_queue_head(&tx->backlog,skb); in poll_tx()
1332 skb_queue_head_init(&tx->backlog); in reserve_or_set_tx()
1407 txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; in close_tx()
2077 skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); in eni_send()
2170 skb_queue_len(&tx->backlog)); in eni_proc_read()
/linux-4.4.14/include/linux/
Dtcp.h392 static inline void fastopen_queue_tune(struct sock *sk, int backlog) in fastopen_queue_tune() argument
397 queue->fastopenq.max_qlen = min_t(unsigned int, backlog, somaxconn); in fastopen_queue_tune()
Dscif.h233 int scif_listen(scif_epd_t epd, int backlog);
Dnet.h276 int kernel_listen(struct socket *sock, int backlog);
Dsecurity.h1119 int security_socket_listen(struct socket *sock, int backlog);
1197 static inline int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument
Dlsm_hooks.h1535 int (*socket_listen)(struct socket *sock, int backlog);
Dnetdevice.h2566 struct napi_struct backlog; member
/linux-4.4.14/Documentation/networking/
Dx25-iface.txt82 kernel if the backlog queue is congested.
96 The probability of packet loss due to backlog congestion can be
121 when a previously congested backlog queue becomes empty again.
Dscaling.txt107 on the desired CPU’s backlog queue and waking up the CPU for processing.
132 and the packet is queued to the tail of that CPU’s backlog queue. At
134 packets have been queued to their backlog queue. The IPI wakes backlog
234 to enqueue packets onto the backlog of another CPU and to wake up that
266 CPU's backlog when a packet in this flow was last enqueued. Each backlog
279 table), the packet is enqueued onto that CPU’s backlog. If they differ,
Dgen_stats.txt26 mystruct->qstats.backlog += skb->pkt_len;
Dnf_conntrack-sysctl.txt51 will take longer for a backlog to be processed.
Dcxgb.txt144 Setting maximum backlog (# of unprocessed packets before kernel drops):
Dip-sysctl.txt176 Limit of socket listen() backlog, known in userspace as SOMAXCONN.
536 Send out syncookies when the syn backlog queue of a socket
/linux-4.4.14/drivers/infiniband/core/
Diwcm.c435 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) in iw_cm_listen() argument
443 if (!backlog) in iw_cm_listen()
444 backlog = default_backlog; in iw_cm_listen()
446 ret = alloc_work_entries(cm_id_priv, backlog); in iw_cm_listen()
455 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); in iw_cm_listen()
Ducma.c86 int backlog; member
343 if (!ctx->backlog) { in ucma_event_handler()
348 ctx->backlog--; in ucma_event_handler()
410 uevent->ctx->backlog++; in ucma_get_event()
1051 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? in ucma_listen()
1052 cmd.backlog : max_backlog; in ucma_listen()
1053 ret = rdma_listen(ctx->cm_id, ctx->backlog); in ucma_listen()
Dcma.c213 int backlog; member
1995 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument
2012 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
2060 ret = rdma_listen(id, id_priv->backlog); in cma_listen_on_dev()
2923 int rdma_listen(struct rdma_cm_id *id, int backlog) in rdma_listen() argument
2945 id_priv->backlog = backlog; in rdma_listen()
2952 ret = cma_iw_listen(id_priv, backlog); in rdma_listen()
2964 id_priv->backlog = 0; in rdma_listen()
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
Dlib-socket.c495 int backlog) in lnet_sock_listen() argument
508 rc = kernel_listen(*sockp, backlog); in lnet_sock_listen()
512 CERROR("Can't set listen backlog %d: %d\n", backlog, rc); in lnet_sock_listen()
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_cm.h303 int backlog; member
377 int backlog; member
Dnes_cm.c2060 cm_node->listener->backlog) { in handle_syn_pkt()
2466 listener->backlog = cm_info->backlog; in mini_cm_listen()
2479 listener, listener->backlog, listener->cm_id); in mini_cm_listen()
3633 int nes_create_listen(struct iw_cm_id *cm_id, int backlog) in nes_create_listen() argument
3659 cm_info.backlog = backlog; in nes_create_listen()
/linux-4.4.14/net/dccp/
Dproto.c234 static inline int dccp_listen_start(struct sock *sk, int backlog) in dccp_listen_start() argument
242 return inet_csk_listen_start(sk, backlog); in dccp_listen_start()
915 int inet_dccp_listen(struct socket *sock, int backlog) in inet_dccp_listen() argument
939 err = dccp_listen_start(sk, backlog); in inet_dccp_listen()
943 sk->sk_max_ack_backlog = backlog; in inet_dccp_listen()
Ddccp.h318 int inet_dccp_listen(struct socket *sock, int backlog);
/linux-4.4.14/net/sunrpc/
Dxprt.c988 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_transmit()
1010 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_add_backlog()
1015 if (rpc_wake_up_next(&xprt->backlog) == NULL) in xprt_wake_up_backlog()
1027 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_throttle_congested()
1329 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init()
1409 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy()
/linux-4.4.14/drivers/misc/mic/scif/
Dscif_epd.h110 int backlog; member
Dscif_epd.c145 if (ep->backlog <= ep->conreqcnt) { in scif_cnctreq()
Dscif_api.c394 int scif_listen(scif_epd_t epd, int backlog) in scif_listen() argument
420 ep->backlog = backlog; in scif_listen()
/linux-4.4.14/net/atm/
Dsvc.c281 static int svc_listen(struct socket *sock, int backlog) in svc_listen() argument
314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; in svc_listen()
/linux-4.4.14/net/rxrpc/
Daf_rxrpc.c188 static int rxrpc_listen(struct socket *sock, int backlog) in rxrpc_listen() argument
194 _enter("%p,%d", rx, backlog); in rxrpc_listen()
209 sk->sk_max_ack_backlog = backlog; in rxrpc_listen()
/linux-4.4.14/drivers/net/wireless/p54/
Dlmac.h258 u8 backlog; member
452 u8 backlog; member
Dtxrx.c921 txhdr->backlog = priv->tx_stats[queue].len - 1; in p54_tx_80211()
/linux-4.4.14/include/uapi/rdma/
Drdma_user_cm.h208 __u32 backlog; member
/linux-4.4.14/net/llc/
Daf_llc.c505 static int llc_ui_listen(struct socket *sock, int backlog) in llc_ui_listen() argument
520 if (!(unsigned int)backlog) /* BSDism */ in llc_ui_listen()
521 backlog = 1; in llc_ui_listen()
522 sk->sk_max_ack_backlog = backlog; in llc_ui_listen()
/linux-4.4.14/net/nfc/
Dllcp_sock.c197 static int llcp_sock_listen(struct socket *sock, int backlog) in llcp_sock_listen() argument
202 pr_debug("sk %p backlog %d\n", sk, backlog); in llcp_sock_listen()
212 sk->sk_max_ack_backlog = backlog; in llcp_sock_listen()
/linux-4.4.14/net/bluetooth/rfcomm/
Dsock.c424 static int rfcomm_sock_listen(struct socket *sock, int backlog) in rfcomm_sock_listen() argument
429 BT_DBG("sk %p backlog %d", sk, backlog); in rfcomm_sock_listen()
464 sk->sk_max_ack_backlog = backlog; in rfcomm_sock_listen()
/linux-4.4.14/net/bluetooth/
Dsco.c588 static int sco_sock_listen(struct socket *sock, int backlog) in sco_sock_listen() argument
594 BT_DBG("sk %p backlog %d", sk, backlog); in sco_sock_listen()
615 sk->sk_max_ack_backlog = backlog; in sco_sock_listen()
Dl2cap_sock.c251 static int l2cap_sock_listen(struct socket *sock, int backlog) in l2cap_sock_listen() argument
257 BT_DBG("sk %p backlog %d", sk, backlog); in l2cap_sock_listen()
285 sk->sk_max_ack_backlog = backlog; in l2cap_sock_listen()
/linux-4.4.14/net/ipv4/
Daf_inet.c194 int inet_listen(struct socket *sock, int backlog) in inet_listen() argument
224 fastopen_queue_tune(sk, backlog); in inet_listen()
232 err = inet_csk_listen_start(sk, backlog); in inet_listen()
236 sk->sk_max_ack_backlog = backlog; in inet_listen()
Dinet_connection_sock.c736 int inet_csk_listen_start(struct sock *sk, int backlog) in inet_csk_listen_start() argument
743 sk->sk_max_ack_backlog = backlog; in inet_csk_listen_start()
/linux-4.4.14/security/tomoyo/
Dtomoyo.c445 static int tomoyo_socket_listen(struct socket *sock, int backlog) in tomoyo_socket_listen() argument
/linux-4.4.14/net/phonet/
Dsocket.c404 static int pn_socket_listen(struct socket *sock, int backlog) in pn_socket_listen() argument
422 sk->sk_max_ack_backlog = backlog; in pn_socket_listen()
/linux-4.4.14/net/
Dsocket.c1392 SYSCALL_DEFINE2(listen, int, fd, int, backlog) in SYSCALL_DEFINE2() argument
1401 if ((unsigned int)backlog > somaxconn) in SYSCALL_DEFINE2()
1402 backlog = somaxconn; in SYSCALL_DEFINE2()
1404 err = security_socket_listen(sock, backlog); in SYSCALL_DEFINE2()
1406 err = sock->ops->listen(sock, backlog); in SYSCALL_DEFINE2()
3175 int kernel_listen(struct socket *sock, int backlog) in kernel_listen() argument
3177 return sock->ops->listen(sock, backlog); in kernel_listen()
DKconfig294 backlog reaches netdev_max_backlog. If a few out of many active flows
/linux-4.4.14/include/linux/sunrpc/
Dxprt.h188 struct rpc_wait_queue backlog; /* waiting for slot */ member
/linux-4.4.14/drivers/md/
Dbitmap.c2299 unsigned long backlog; in backlog_store() local
2300 int rv = kstrtoul(buf, 10, &backlog); in backlog_store()
2303 if (backlog > COUNTER_MAX) in backlog_store()
2305 mddev->bitmap_info.max_write_behind = backlog; in backlog_store()
2310 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Diw_cxgb4.h803 int backlog; member
953 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
Ddevice.c627 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep()
640 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep()
Dcm.c3335 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) in c4iw_create_listen() argument
3356 ep->backlog = backlog; in c4iw_create_listen()
/linux-4.4.14/include/net/sctp/
Dsctp.h102 int sctp_inet_listen(struct socket *sock, int backlog);
/linux-4.4.14/net/netrom/
Daf_netrom.c401 static int nr_listen(struct socket *sock, int backlog) in nr_listen() argument
408 sk->sk_max_ack_backlog = backlog; in nr_listen()
/linux-4.4.14/drivers/target/iscsi/
Discsi_target_login.c883 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; in iscsit_setup_np() local
963 ret = kernel_listen(sock, backlog); in iscsit_setup_np()
/linux-4.4.14/drivers/staging/lustre/include/linux/lnet/
Dlib-lnet.h648 int lnet_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
/linux-4.4.14/net/rose/
Daf_rose.c487 static int rose_listen(struct socket *sock, int backlog) in rose_listen() argument
498 sk->sk_max_ack_backlog = backlog; in rose_listen()
/linux-4.4.14/net/vmw_vsock/
Daf_vsock.c1322 static int vsock_listen(struct socket *sock, int backlog) in vsock_listen() argument
1349 sk->sk_max_ack_backlog = backlog; in vsock_listen()
/linux-4.4.14/net/sctp/
Dsocket.c6307 static int sctp_listen_start(struct sock *sk, int backlog) in sctp_listen_start() argument
6348 sk->sk_max_ack_backlog = backlog; in sctp_listen_start()
6367 int sctp_inet_listen(struct socket *sock, int backlog) in sctp_inet_listen() argument
6373 if (unlikely(backlog < 0)) in sctp_inet_listen()
6386 if (!backlog) { in sctp_inet_listen()
6400 sk->sk_max_ack_backlog = backlog; in sctp_inet_listen()
6402 err = sctp_listen_start(sk, backlog); in sctp_inet_listen()
/linux-4.4.14/Documentation/
Diostats.txt91 I/O completion time and the backlog that may be accumulating.
Dmd.txt318 bitmap/backlog
322 'backlog' sets a limit on the number of concurrent background
/linux-4.4.14/net/x25/
Daf_x25.c495 static int x25_listen(struct socket *sock, int backlog) in x25_listen() argument
503 sk->sk_max_ack_backlog = backlog; in x25_listen()
/linux-4.4.14/net/unix/
Daf_unix.c602 static int unix_listen(struct socket *sock, int backlog) in unix_listen() argument
618 if (backlog > sk->sk_max_ack_backlog) in unix_listen()
620 sk->sk_max_ack_backlog = backlog; in unix_listen()
/linux-4.4.14/net/ax25/
Daf_ax25.c773 static int ax25_listen(struct socket *sock, int backlog) in ax25_listen() argument
780 sk->sk_max_ack_backlog = backlog; in ax25_listen()
/linux-4.4.14/net/irda/
Daf_irda.c735 static int irda_listen(struct socket *sock, int backlog) in irda_listen() argument
747 sk->sk_max_ack_backlog = backlog; in irda_listen()
/linux-4.4.14/security/
Dsecurity.c1218 int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument
1220 return call_int_hook(socket_listen, 0, sock, backlog); in security_socket_listen()
/linux-4.4.14/net/decnet/
Daf_decnet.c1274 static int dn_listen(struct socket *sock, int backlog) in dn_listen() argument
1287 sk->sk_max_ack_backlog = backlog; in dn_listen()
/linux-4.4.14/net/iucv/
Daf_iucv.c904 static int iucv_sock_listen(struct socket *sock, int backlog) in iucv_sock_listen() argument
918 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
/linux-4.4.14/kernel/
Daudit.c851 s.backlog = skb_queue_len(&audit_skb_queue); in audit_receive_msg()
/linux-4.4.14/security/selinux/
Dhooks.c4253 static int selinux_socket_listen(struct socket *sock, int backlog) in selinux_socket_listen() argument