/linux-4.1.27/drivers/crypto/ccp/ |
D | ccp-crypto-main.c | 47 struct list_head *backlog; member 89 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() argument 94 *backlog = NULL; in ccp_crypto_cmd_complete() 113 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 115 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 116 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 118 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete() 120 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete() 123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() [all …]
|
D | ccp-dev.c | 114 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd() 174 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local 199 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd() 200 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd() 202 list_del(&backlog->entry); in ccp_dequeue_cmd() 207 if (backlog) { in ccp_dequeue_cmd() 208 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); in ccp_dequeue_cmd() 209 schedule_work(&backlog->work); in ccp_dequeue_cmd() 304 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct() 513 while (!list_empty(&ccp->backlog)) { in ccp_destroy() [all …]
|
D | ccp-dev.h | 217 struct list_head backlog; member
|
/linux-4.1.27/tools/perf/ui/tui/ |
D | helpline.c | 32 static int backlog; in tui_helpline__show() local 35 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show() 36 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show() 37 backlog += ret; in tui_helpline__show() 41 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show() 44 backlog = 0; in tui_helpline__show()
|
/linux-4.1.27/tools/perf/ui/gtk/ |
D | helpline.c | 31 static int backlog; in gtk_helpline_show() local 33 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show() 34 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show() 35 backlog += ret; in gtk_helpline_show() 39 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show() 42 backlog = 0; in gtk_helpline_show()
|
/linux-4.1.27/net/sched/ |
D | sch_gred.c | 40 u32 backlog; /* bytes on the virtualQ */ member 118 return sch->qstats.backlog; in gred_backlog() 120 return q->backlog; in gred_backlog() 233 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 261 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 264 if (!sch->qstats.backlog) in gred_dequeue() 267 if (!q->backlog) in gred_dequeue() 293 q->backlog -= len; in gred_drop() 297 if (!sch->qstats.backlog) in gred_drop() 300 if (!q->backlog) in gred_drop() [all …]
|
D | sch_sfq.c | 113 unsigned int backlog; member 330 slot->backlog -= len; in sfq_drop() 397 slot->backlog = 0; /* should already be 0 anyway... */ in sfq_enqueue() 404 slot->backlog); in sfq_enqueue() 455 sch->qstats.backlog -= delta; in sfq_enqueue() 456 slot->backlog -= delta; in sfq_enqueue() 465 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue() 524 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue() 575 slot->backlog = 0; in sfq_rehash() 605 slot->backlog); in sfq_rehash() [all …]
|
D | sch_mqprio.c | 239 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump() 350 qstats.backlog += qdisc->qstats.backlog; in mqprio_dump_class_stats()
|
D | sch_red.c | 67 child->qstats.backlog); in red_enqueue() 271 sch->qstats.backlog = q->qdisc->qstats.backlog; in red_dump()
|
D | sch_mq.c | 115 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump()
|
D | sch_pie.c | 119 if (sch->qstats.backlog < 2 * mtu) in drop_early() 248 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue() 313 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
|
D | sch_plug.c | 93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
|
D | sch_fifo.c | 24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue()
|
D | sch_tbf.c | 456 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
|
D | sch_hfsc.c | 1373 cl->qstats.backlog = cl->qdisc->qstats.backlog; in hfsc_dump_class_stats() 1568 sch->qstats.backlog = 0; in hfsc_dump_qdisc() 1571 sch->qstats.backlog += cl->qdisc->qstats.backlog; in hfsc_dump_qdisc()
|
D | sch_sfb.c | 573 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
D | sch_fq_codel.c | 550 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
|
D | sch_generic.c | 537 qdisc->qstats.backlog = 0; in pfifo_fast_reset()
|
/linux-4.1.27/net/core/ |
D | gen_stats.c | 231 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 247 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() 282 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|
D | dev.c | 3262 ____napi_schedule(sd, &sd->backlog); in rps_trigger_softirq() 3361 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { in enqueue_to_backlog() 3363 ____napi_schedule(sd, &sd->backlog); in enqueue_to_backlog() 4379 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); in process_backlog() 7496 sd->backlog.poll = process_backlog; in net_dev_init() 7497 sd->backlog.weight = weight_p; in net_dev_init()
|
D | sock.c | 2147 int sock_no_listen(struct socket *sock, int backlog) in sock_no_listen() argument
|
/linux-4.1.27/drivers/crypto/qce/ |
D | core.c | 82 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local 97 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue() 107 if (backlog) { in qce_handle_queue() 109 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
|
/linux-4.1.27/include/net/ |
D | red.h | 280 unsigned int backlog) in red_calc_qavg_no_idle_time() argument 291 return v->qavg + (backlog - (v->qavg >> p->Wlog)); in red_calc_qavg_no_idle_time() 296 unsigned int backlog) in red_calc_qavg() argument 299 return red_calc_qavg_no_idle_time(p, v, backlog); in red_calc_qavg()
|
D | codel.h | 235 sch->qstats.backlog -= qdisc_pkt_len(skb); in codel_should_drop() 241 sch->qstats.backlog <= params->mtu) { in codel_should_drop()
|
D | inet_common.h | 30 int inet_listen(struct socket *sock, int backlog);
|
D | sch_generic.h | 545 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec() 551 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc() 691 sch->qstats.backlog = 0; in qdisc_reset_queue()
|
/linux-4.1.27/drivers/staging/ozwpan/ |
D | ozpd.c | 524 void oz_send_queued_frames(struct oz_pd *pd, int backlog) in oz_send_queued_frames() argument 527 backlog++; in oz_send_queued_frames() 532 backlog += pd->nb_queued_isoc_frames; in oz_send_queued_frames() 533 if (backlog <= 0) in oz_send_queued_frames() 535 if (backlog > OZ_MAX_SUBMITTED_ISOC) in oz_send_queued_frames() 536 backlog = OZ_MAX_SUBMITTED_ISOC; in oz_send_queued_frames() 540 if ((backlog <= 0) && (pd->isoc_sent == 0)) in oz_send_queued_frames() 545 if (backlog <= 0) in oz_send_queued_frames() 550 while (backlog--) { in oz_send_queued_frames() 551 if (oz_send_next_queued_frame(pd, backlog) < 0) in oz_send_queued_frames()
|
D | ozpd.h | 122 void oz_send_queued_frames(struct oz_pd *pd, int backlog);
|
D | ozproto.c | 386 int backlog = pd->nb_queued_frames; in oz_rx_frame() local 390 oz_send_queued_frames(pd, backlog); in oz_rx_frame()
|
/linux-4.1.27/include/rdma/ |
D | iw_cm.h | 122 int backlog); 180 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
|
D | rdma_cm.h | 279 int rdma_listen(struct rdma_cm_id *id, int backlog);
|
/linux-4.1.27/drivers/crypto/ |
D | mxs-dcp.c | 347 struct crypto_async_request *backlog; in dcp_chan_thread_aes() local 356 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes() 360 if (backlog) in dcp_chan_thread_aes() 361 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes() 640 struct crypto_async_request *backlog; in dcp_chan_thread_sha() local 652 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha() 656 if (backlog) in dcp_chan_thread_sha() 657 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
|
D | s5p-sss.c | 484 struct crypto_async_request *async_req, *backlog; in s5p_tasklet_cb() local 489 backlog = crypto_get_backlog(&dev->queue); in s5p_tasklet_cb() 499 if (backlog) in s5p_tasklet_cb() 500 backlog->complete(backlog, -EINPROGRESS); in s5p_tasklet_cb()
|
D | bfin_crc.c | 302 struct crypto_async_request *async_req, *backlog; in bfin_crypto_crc_handle_queue() local 318 backlog = crypto_get_backlog(&crc->queue); in bfin_crypto_crc_handle_queue() 327 if (backlog) in bfin_crypto_crc_handle_queue() 328 backlog->complete(backlog, -EINPROGRESS); in bfin_crypto_crc_handle_queue()
|
D | mv_cesa.c | 598 struct crypto_async_request *backlog; in queue_manag() local 607 backlog = crypto_get_backlog(&cpg->queue); in queue_manag() 616 if (backlog) { in queue_manag() 617 backlog->complete(backlog, -EINPROGRESS); in queue_manag() 618 backlog = NULL; in queue_manag()
|
D | img-hash.c | 497 struct crypto_async_request *async_req, *backlog; in img_hash_handle_queue() local 512 backlog = crypto_get_backlog(&hdev->queue); in img_hash_handle_queue() 522 if (backlog) in img_hash_handle_queue() 523 backlog->complete(backlog, -EINPROGRESS); in img_hash_handle_queue()
|
D | omap-des.c | 592 struct crypto_async_request *async_req, *backlog; in omap_des_handle_queue() local 605 backlog = crypto_get_backlog(&dd->queue); in omap_des_handle_queue() 614 if (backlog) in omap_des_handle_queue() 615 backlog->complete(backlog, -EINPROGRESS); in omap_des_handle_queue()
|
D | omap-aes.c | 610 struct crypto_async_request *async_req, *backlog; in omap_aes_handle_queue() local 623 backlog = crypto_get_backlog(&dd->queue); in omap_aes_handle_queue() 632 if (backlog) in omap_aes_handle_queue() 633 backlog->complete(backlog, -EINPROGRESS); in omap_aes_handle_queue()
|
D | atmel-tdes.c | 591 struct crypto_async_request *async_req, *backlog; in atmel_tdes_handle_queue() local 604 backlog = crypto_get_backlog(&dd->queue); in atmel_tdes_handle_queue() 613 if (backlog) in atmel_tdes_handle_queue() 614 backlog->complete(backlog, -EINPROGRESS); in atmel_tdes_handle_queue()
|
D | atmel-sha.c | 832 struct crypto_async_request *async_req, *backlog; in atmel_sha_handle_queue() local 846 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue() 856 if (backlog) in atmel_sha_handle_queue() 857 backlog->complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
|
D | atmel-aes.c | 565 struct crypto_async_request *async_req, *backlog; in atmel_aes_handle_queue() local 578 backlog = crypto_get_backlog(&dd->queue); in atmel_aes_handle_queue() 587 if (backlog) in atmel_aes_handle_queue() 588 backlog->complete(backlog, -EINPROGRESS); in atmel_aes_handle_queue()
|
D | sahara.c | 1103 struct crypto_async_request *backlog; in sahara_queue_manage() local 1110 backlog = crypto_get_backlog(&dev->queue); in sahara_queue_manage() 1114 if (backlog) in sahara_queue_manage() 1115 backlog->complete(backlog, -EINPROGRESS); in sahara_queue_manage()
|
D | omap-sham.c | 1010 struct crypto_async_request *async_req, *backlog; in omap_sham_handle_queue() local 1022 backlog = crypto_get_backlog(&dd->queue); in omap_sham_handle_queue() 1031 if (backlog) in omap_sham_handle_queue() 1032 backlog->complete(backlog, -EINPROGRESS); in omap_sham_handle_queue()
|
D | hifn_795x.c | 2139 struct crypto_async_request *async_req, *backlog; in hifn_process_queue() local 2146 backlog = crypto_get_backlog(&dev->queue); in hifn_process_queue() 2153 if (backlog) in hifn_process_queue() 2154 backlog->complete(backlog, -EINPROGRESS); in hifn_process_queue()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.h | 172 int backlog; member 218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
|
D | iwch_cm.c | 1986 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) in iwch_create_listen() argument 2010 ep->backlog = backlog; in iwch_create_listen()
|
/linux-4.1.27/include/uapi/linux/ |
D | gen_stats.h | 61 __u32 backlog; member
|
D | pkt_sched.h | 41 __u32 backlog; member 281 __u32 backlog; member
|
D | audit.h | 414 __u32 backlog; /* messages waiting in queue */ member
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
D | c2_cm.c | 135 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) in c2_llp_service_create() argument 166 wr.backlog = cpu_to_be32(backlog); in c2_llp_service_create()
|
D | c2_provider.c | 619 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument 624 err = c2_llp_service_create(cm_id, backlog); in c2_service_create()
|
D | c2.h | 527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
|
D | c2_wr.h | 1282 __be32 backlog; /* tradional tcp listen bl */ member
|
/linux-4.1.27/net/tipc/ |
D | link.c | 386 lim = l->window + l->backlog[imp].limit; in link_prepare_wakeup() 388 if ((pnd[imp] + l->backlog[imp].len) >= lim) in link_prepare_wakeup() 410 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; in tipc_link_purge_backlog() 411 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; in tipc_link_purge_backlog() 412 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; in tipc_link_purge_backlog() 413 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; in tipc_link_purge_backlog() 414 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; in tipc_link_purge_backlog() 715 if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit)) in __tipc_link_xmit() 747 link->backlog[imp].len++; in __tipc_link_xmit() 887 link->backlog[msg_importance(msg)].len--; in tipc_link_push_packets() [all …]
|
D | link.h | 187 } backlog[5]; member
|
/linux-4.1.27/include/crypto/ |
D | algapi.h | 70 struct list_head *backlog; member 339 return queue->backlog == &queue->list ? NULL : in crypto_get_backlog() 340 container_of(queue->backlog, struct crypto_async_request, list); in crypto_get_backlog()
|
/linux-4.1.27/crypto/ |
D | algapi.c | 854 queue->backlog = &queue->list; in crypto_init_queue() 869 if (queue->backlog == &queue->list) in crypto_enqueue_request() 870 queue->backlog = &request->list; in crypto_enqueue_request() 890 if (queue->backlog != &queue->list) in __crypto_dequeue_request() 891 queue->backlog = queue->backlog->next; in __crypto_dequeue_request()
|
D | mcryptd.c | 156 struct crypto_async_request *req, *backlog; in mcryptd_queue_worker() local 173 backlog = crypto_get_backlog(&cpu_queue->queue); in mcryptd_queue_worker() 183 if (backlog) in mcryptd_queue_worker() 184 backlog->complete(backlog, -EINPROGRESS); in mcryptd_queue_worker()
|
D | cryptd.c | 137 struct crypto_async_request *req, *backlog; in cryptd_queue_worker() local 148 backlog = crypto_get_backlog(&cpu_queue->queue); in cryptd_queue_worker() 156 if (backlog) in cryptd_queue_worker() 157 backlog->complete(backlog, -EINPROGRESS); in cryptd_queue_worker()
|
/linux-4.1.27/include/linux/ |
D | tcp.h | 379 static inline int fastopen_init_queue(struct sock *sk, int backlog) in fastopen_init_queue() argument 394 queue->fastopenq->max_qlen = backlog; in fastopen_init_queue()
|
D | net.h | 269 int kernel_listen(struct socket *sock, int backlog);
|
D | security.h | 1686 int (*socket_listen) (struct socket *sock, int backlog); 2700 int security_socket_listen(struct socket *sock, int backlog); 2778 static inline int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument
|
D | netdevice.h | 2471 struct napi_struct backlog; member
|
/linux-4.1.27/drivers/atm/ |
D | zatm.h | 54 struct sk_buff_head backlog; /* list of buffers waiting for ring */ member
|
D | eni.h | 49 struct sk_buff_head backlog; /* queue of waiting TX buffers */ member
|
D | zatm.c | 731 while ((skb = skb_dequeue(&zatm_vcc->backlog))) in dequeue_tx() 733 skb_queue_head(&zatm_vcc->backlog,skb); in dequeue_tx() 880 if (skb_peek(&zatm_vcc->backlog)) { in close_tx() 883 wait_event(zatm_vcc->tx_wait, !skb_peek(&zatm_vcc->backlog)); in close_tx() 964 skb_queue_head_init(&zatm_vcc->backlog); in open_tx_first() 1544 skb_queue_tail(&ZATM_VCC(vcc)->backlog,skb); in zatm_send()
|
D | lanai.c | 243 struct sk_buff_head backlog; member 777 while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL) in lanai_shutdown_tx_vci() 1153 return !skb_queue_empty(&lvcc->tx.backlog); in vcc_is_backlogged() 1312 skb = skb_dequeue(&lvcc->tx.backlog); in vcc_tx_unqueue_aal5() 1318 skb_queue_head(&lvcc->tx.backlog, skb); in vcc_tx_unqueue_aal5() 1344 skb_queue_tail(&lvcc->tx.backlog, skb); in vcc_tx_aal5() 1474 skb_queue_head_init(&lvcc->tx.backlog); in new_lanai_vcc()
|
D | eni.c | 1193 while ((skb = skb_dequeue(&tx->backlog))) { in poll_tx() 1197 skb_queue_head(&tx->backlog,skb); in poll_tx() 1332 skb_queue_head_init(&tx->backlog); in reserve_or_set_tx() 1407 txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; in close_tx() 2077 skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); in eni_send() 2170 skb_queue_len(&tx->backlog)); in eni_proc_read()
|
/linux-4.1.27/Documentation/networking/ |
D | x25-iface.txt | 82 kernel if the backlog queue is congested. 96 The probability of packet loss due to backlog congestion can be 121 when a previously congested backlog queue becomes empty again.
|
D | scaling.txt | 107 on the desired CPU’s backlog queue and waking up the CPU for processing. 132 and the packet is queued to the tail of that CPU’s backlog queue. At 134 packets have been queued to their backlog queue. The IPI wakes backlog 234 to enqueue packets onto the backlog of another CPU and to wake up that 266 CPU's backlog when a packet in this flow was last enqueued. Each backlog 279 table), the packet is enqueued onto that CPU’s backlog. If they differ,
|
D | gen_stats.txt | 26 mystruct->qstats.backlog += skb->pkt_len;
|
D | nf_conntrack-sysctl.txt | 51 will take longer for a backlog to be processed.
|
D | cxgb.txt | 144 Setting maximum backlog (# of unprocessed packets before kernel drops):
|
D | ip-sysctl.txt | 176 Limit of socket listen() backlog, known in userspace as SOMAXCONN. 510 Send out syncookies when the syn backlog queue of a socket
|
/linux-4.1.27/drivers/infiniband/core/ |
D | iwcm.c | 435 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) in iw_cm_listen() argument 443 if (!backlog) in iw_cm_listen() 444 backlog = default_backlog; in iw_cm_listen() 446 ret = alloc_work_entries(cm_id_priv, backlog); in iw_cm_listen() 455 ret = cm_id->device->iwcm->create_listen(cm_id, backlog); in iw_cm_listen()
|
D | ucma.c | 84 int backlog; member 268 if (!ctx->backlog) { in ucma_event_handler() 273 ctx->backlog--; in ucma_event_handler() 328 uevent->ctx->backlog++; in ucma_get_event() 963 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? in ucma_listen() 964 cmd.backlog : max_backlog; in ucma_listen() 965 ret = rdma_listen(ctx->cm_id, ctx->backlog); in ucma_listen()
|
D | cma.c | 131 int backlog; member 1602 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument 1618 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen() 1666 ret = rdma_listen(id, id_priv->backlog); in cma_listen_on_dev() 2533 int rdma_listen(struct rdma_cm_id *id, int backlog) in rdma_listen() argument 2555 id_priv->backlog = backlog; in rdma_listen() 2564 ret = cma_iw_listen(id_priv, backlog); in rdma_listen() 2577 id_priv->backlog = 0; in rdma_listen()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/ |
D | linux-tcpip.c | 503 __u32 local_ip, int local_port, int backlog) in libcfs_sock_listen() argument 516 rc = (*sockp)->ops->listen(*sockp, backlog); in libcfs_sock_listen() 520 CERROR("Can't set listen backlog %d: %d\n", backlog, rc); in libcfs_sock_listen()
|
/linux-4.1.27/drivers/infiniband/hw/nes/ |
D | nes_cm.h | 303 int backlog; member 375 int backlog; member
|
D | nes_cm.c | 2057 cm_node->listener->backlog) { in handle_syn_pkt() 2463 listener->backlog = cm_info->backlog; in mini_cm_listen() 2476 listener, listener->backlog, listener->cm_id); in mini_cm_listen() 3626 int nes_create_listen(struct iw_cm_id *cm_id, int backlog) in nes_create_listen() argument 3652 cm_info.backlog = backlog; in nes_create_listen()
|
/linux-4.1.27/net/dccp/ |
D | proto.c | 234 static inline int dccp_listen_start(struct sock *sk, int backlog) in dccp_listen_start() argument 242 return inet_csk_listen_start(sk, backlog); in dccp_listen_start() 916 int inet_dccp_listen(struct socket *sock, int backlog) in inet_dccp_listen() argument 940 err = dccp_listen_start(sk, backlog); in inet_dccp_listen() 944 sk->sk_max_ack_backlog = backlog; in inet_dccp_listen()
|
D | dccp.h | 316 int inet_dccp_listen(struct socket *sock, int backlog);
|
/linux-4.1.27/net/sunrpc/ |
D | xprt.c | 984 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_transmit() 1006 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_add_backlog() 1011 if (rpc_wake_up_next(&xprt->backlog) == NULL) in xprt_wake_up_backlog() 1023 rpc_sleep_on(&xprt->backlog, task, NULL); in xprt_throttle_congested() 1324 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init() 1404 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy()
|
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/ |
D | libcfs.h | 95 int libcfs_sock_listen(struct socket **sockp, __u32 ip, int port, int backlog);
|
/linux-4.1.27/net/rxrpc/ |
D | af_rxrpc.c | 188 static int rxrpc_listen(struct socket *sock, int backlog) in rxrpc_listen() argument 194 _enter("%p,%d", rx, backlog); in rxrpc_listen() 209 sk->sk_max_ack_backlog = backlog; in rxrpc_listen()
|
/linux-4.1.27/net/atm/ |
D | svc.c | 281 static int svc_listen(struct socket *sock, int backlog) in svc_listen() argument 314 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; in svc_listen()
|
/linux-4.1.27/drivers/net/wireless/p54/ |
D | lmac.h | 258 u8 backlog; member 452 u8 backlog; member
|
D | txrx.c | 921 txhdr->backlog = priv->tx_stats[queue].len - 1; in p54_tx_80211()
|
/linux-4.1.27/include/uapi/rdma/ |
D | rdma_user_cm.h | 208 __u32 backlog; member
|
/linux-4.1.27/net/llc/ |
D | af_llc.c | 505 static int llc_ui_listen(struct socket *sock, int backlog) in llc_ui_listen() argument 520 if (!(unsigned int)backlog) /* BSDism */ in llc_ui_listen() 521 backlog = 1; in llc_ui_listen() 522 sk->sk_max_ack_backlog = backlog; in llc_ui_listen()
|
/linux-4.1.27/net/nfc/ |
D | llcp_sock.c | 197 static int llcp_sock_listen(struct socket *sock, int backlog) in llcp_sock_listen() argument 202 pr_debug("sk %p backlog %d\n", sk, backlog); in llcp_sock_listen() 212 sk->sk_max_ack_backlog = backlog; in llcp_sock_listen()
|
/linux-4.1.27/net/bluetooth/rfcomm/ |
D | sock.c | 420 static int rfcomm_sock_listen(struct socket *sock, int backlog) in rfcomm_sock_listen() argument 425 BT_DBG("sk %p backlog %d", sk, backlog); in rfcomm_sock_listen() 460 sk->sk_max_ack_backlog = backlog; in rfcomm_sock_listen()
|
/linux-4.1.27/net/ipv4/ |
D | af_inet.c | 192 int inet_listen(struct socket *sock, int backlog) in inet_listen() argument 222 err = fastopen_init_queue(sk, backlog); in inet_listen() 234 err = inet_csk_listen_start(sk, backlog); in inet_listen() 238 sk->sk_max_ack_backlog = backlog; in inet_listen()
|
/linux-4.1.27/security/tomoyo/ |
D | tomoyo.c | 451 static int tomoyo_socket_listen(struct socket *sock, int backlog) in tomoyo_socket_listen() argument
|
/linux-4.1.27/net/bluetooth/ |
D | sco.c | 582 static int sco_sock_listen(struct socket *sock, int backlog) in sco_sock_listen() argument 588 BT_DBG("sk %p backlog %d", sk, backlog); in sco_sock_listen() 609 sk->sk_max_ack_backlog = backlog; in sco_sock_listen()
|
D | l2cap_sock.c | 251 static int l2cap_sock_listen(struct socket *sock, int backlog) in l2cap_sock_listen() argument 257 BT_DBG("sk %p backlog %d", sk, backlog); in l2cap_sock_listen() 285 sk->sk_max_ack_backlog = backlog; in l2cap_sock_listen()
|
/linux-4.1.27/include/linux/sunrpc/ |
D | xprt.h | 176 struct rpc_wait_queue backlog; /* waiting for slot */ member
|
/linux-4.1.27/net/phonet/ |
D | socket.c | 404 static int pn_socket_listen(struct socket *sock, int backlog) in pn_socket_listen() argument 422 sk->sk_max_ack_backlog = backlog; in pn_socket_listen()
|
/linux-4.1.27/net/ |
D | socket.c | 1401 SYSCALL_DEFINE2(listen, int, fd, int, backlog) in SYSCALL_DEFINE2() argument 1410 if ((unsigned int)backlog > somaxconn) in SYSCALL_DEFINE2() 1411 backlog = somaxconn; in SYSCALL_DEFINE2() 1413 err = security_socket_listen(sock, backlog); in SYSCALL_DEFINE2() 1415 err = sock->ops->listen(sock, backlog); in SYSCALL_DEFINE2() 3184 int kernel_listen(struct socket *sock, int backlog) in kernel_listen() argument 3186 return sock->ops->listen(sock, backlog); in kernel_listen()
|
D | Kconfig | 290 backlog reaches netdev_max_backlog. If a few out of many active flows
|
/linux-4.1.27/drivers/md/ |
D | bitmap.c | 2304 unsigned long backlog; in backlog_store() local 2305 int rv = kstrtoul(buf, 10, &backlog); in backlog_store() 2308 if (backlog > COUNTER_MAX) in backlog_store() 2310 mddev->bitmap_info.max_write_behind = backlog; in backlog_store() 2315 __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store);
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | iw_cxgb4.h | 815 int backlog; member 965 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
|
D | device.c | 627 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep() 640 ep->com.flags, ep->stid, ep->backlog, in dump_listen_ep()
|
D | cm.c | 3228 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) in c4iw_create_listen() argument 3249 ep->backlog = backlog; in c4iw_create_listen()
|
/linux-4.1.27/include/net/sctp/ |
D | sctp.h | 102 int sctp_inet_listen(struct socket *sock, int backlog);
|
/linux-4.1.27/drivers/target/iscsi/ |
D | iscsi_target_login.c | 939 int backlog = ISCSIT_TCP_BACKLOG, ret, opt = 0, len; in iscsit_setup_np() local 1019 ret = kernel_listen(sock, backlog); in iscsit_setup_np()
|
/linux-4.1.27/net/netrom/ |
D | af_netrom.c | 401 static int nr_listen(struct socket *sock, int backlog) in nr_listen() argument 408 sk->sk_max_ack_backlog = backlog; in nr_listen()
|
/linux-4.1.27/security/ |
D | security.c | 1191 int security_socket_listen(struct socket *sock, int backlog) in security_socket_listen() argument 1193 return security_ops->socket_listen(sock, backlog); in security_socket_listen()
|
D | capability.c | 633 static int cap_socket_listen(struct socket *sock, int backlog) in cap_socket_listen() argument
|
/linux-4.1.27/net/rose/ |
D | af_rose.c | 486 static int rose_listen(struct socket *sock, int backlog) in rose_listen() argument 497 sk->sk_max_ack_backlog = backlog; in rose_listen()
|
/linux-4.1.27/net/vmw_vsock/ |
D | af_vsock.c | 1322 static int vsock_listen(struct socket *sock, int backlog) in vsock_listen() argument 1349 sk->sk_max_ack_backlog = backlog; in vsock_listen()
|
/linux-4.1.27/net/sctp/ |
D | socket.c | 6320 static int sctp_listen_start(struct sock *sk, int backlog) in sctp_listen_start() argument 6361 sk->sk_max_ack_backlog = backlog; in sctp_listen_start() 6380 int sctp_inet_listen(struct socket *sock, int backlog) in sctp_inet_listen() argument 6386 if (unlikely(backlog < 0)) in sctp_inet_listen() 6399 if (!backlog) { in sctp_inet_listen() 6413 sk->sk_max_ack_backlog = backlog; in sctp_inet_listen() 6415 err = sctp_listen_start(sk, backlog); in sctp_inet_listen()
|
/linux-4.1.27/net/unix/ |
D | af_unix.c | 590 static int unix_listen(struct socket *sock, int backlog) in unix_listen() argument 606 if (backlog > sk->sk_max_ack_backlog) in unix_listen() 608 sk->sk_max_ack_backlog = backlog; in unix_listen()
|
/linux-4.1.27/Documentation/ |
D | iostats.txt | 91 I/O completion time and the backlog that may be accumulating.
|
D | md.txt | 318 bitmap/backlog 322 'backlog' sets a limit on the number of concurrent background
|
/linux-4.1.27/net/x25/ |
D | af_x25.c | 495 static int x25_listen(struct socket *sock, int backlog) in x25_listen() argument 503 sk->sk_max_ack_backlog = backlog; in x25_listen()
|
/linux-4.1.27/net/ax25/ |
D | af_ax25.c | 774 static int ax25_listen(struct socket *sock, int backlog) in ax25_listen() argument 781 sk->sk_max_ack_backlog = backlog; in ax25_listen()
|
/linux-4.1.27/net/irda/ |
D | af_irda.c | 735 static int irda_listen(struct socket *sock, int backlog) in irda_listen() argument 747 sk->sk_max_ack_backlog = backlog; in irda_listen()
|
/linux-4.1.27/net/decnet/ |
D | af_decnet.c | 1274 static int dn_listen(struct socket *sock, int backlog) in dn_listen() argument 1287 sk->sk_max_ack_backlog = backlog; in dn_listen()
|
/linux-4.1.27/net/iucv/ |
D | af_iucv.c | 905 static int iucv_sock_listen(struct socket *sock, int backlog) in iucv_sock_listen() argument 919 sk->sk_max_ack_backlog = backlog; in iucv_sock_listen()
|
/linux-4.1.27/kernel/ |
D | audit.c | 837 s.backlog = skb_queue_len(&audit_skb_queue); in audit_receive_msg()
|
/linux-4.1.27/security/selinux/ |
D | hooks.c | 4237 static int selinux_socket_listen(struct socket *sock, int backlog) in selinux_socket_listen() argument
|