/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_sdma.c | 518 tx->txreq.start_idx = 0; in complete_sdma_err_req() 519 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 520 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 555 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send() 565 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send() 568 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 584 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 605 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 643 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 645 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) in qib_sdma_verbs_send() [all …]
|
D | qib_verbs.c | 959 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq() 988 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq() 1012 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq() 1013 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 1015 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 1023 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 1068 if (qp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 1070 avail -= qp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 1097 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete() 1106 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) in sdma_complete() [all …]
|
D | qib.h | 260 struct qib_sdma_txreq txreq; member
|
/linux-4.4.14/drivers/net/xen-netback/ |
D | netback.c | 1295 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 1321 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops() 1324 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops() 1325 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 1328 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 1334 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 1348 make_tx_response(queue, &txreq, in xenvif_tx_build_gops() 1362 make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); in xenvif_tx_build_gops() 1367 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); in xenvif_tx_build_gops() 1373 if (unlikely(txreq.size < ETH_HLEN)) { in xenvif_tx_build_gops() [all …]
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_sdma.c | 693 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { in ipath_sdma_verbs_send() 700 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, in ipath_sdma_verbs_send() 710 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) in ipath_sdma_verbs_send() 719 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC) in ipath_sdma_verbs_send() 720 tx->txreq.start_idx = tail; in ipath_sdma_verbs_send() 747 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) in ipath_sdma_verbs_send() 786 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { in ipath_sdma_verbs_send() 795 tx->txreq.next_descq_idx = tail; in ipath_sdma_verbs_send() 796 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK; in ipath_sdma_verbs_send() 798 dd->ipath_sdma_descq_added += tx->txreq.sg_count; in ipath_sdma_verbs_send() [all …]
|
D | ipath_verbs.c | 1025 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list); in get_txreq() 1037 list_add(&tx->txreq.list, &dev->txreq_free); in put_txreq() 1066 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) in sdma_complete() 1067 kfree(tx->txreq.map_addr); in sdma_complete() 1153 tx->txreq.callback = sdma_complete; in ipath_verbs_send_dma() 1154 tx->txreq.callback_cookie = tx; in ipath_verbs_send_dma() 1155 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST | in ipath_verbs_send_dma() 1158 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF; in ipath_verbs_send_dma() 1163 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15; in ipath_verbs_send_dma() 1180 tx->txreq.sg_count = ndesc; in ipath_verbs_send_dma() [all …]
|
D | ipath_qp.c | 988 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) in ipath_destroy_qp() 989 kfree(qp->s_tx->txreq.map_addr); in ipath_destroy_qp() 991 list_add(&qp->s_tx->txreq.list, &dev->txreq_free); in ipath_destroy_qp()
|
D | ipath_verbs.h | 658 struct ipath_sdma_txreq txreq; member
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | user_sdma.c | 262 struct sdma_txreq txreq; member 310 struct sdma_txreq *txreq, in defer_packet_queue() argument 317 container_of(txreq, struct user_sdma_txreq, txreq); in defer_packet_queue() 319 if (sdma_progress(sde, seq, txreq)) { in defer_packet_queue() 884 ret = sdma_txinit_ahg(&tx->txreq, in user_sdma_send_pkts() 891 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, in user_sdma_send_pkts() 903 sdma_txinit_ahg(&tx->txreq, in user_sdma_send_pkts() 910 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() 942 ret = sdma_txadd_page(pq->dd, &tx->txreq, in user_sdma_send_pkts() 1003 list_add_tail(&tx->txreq.list, &req->txps); in user_sdma_send_pkts() [all …]
|
D | verbs.c | 792 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq() 826 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 906 &tx->txreq, in build_verbs_ulp_payload() 948 &tx->txreq, in build_verbs_tx_desc() 963 &tx->txreq, in build_verbs_tx_desc() 983 &tx->txreq, in build_verbs_tx_desc() 1022 tx = container_of(stx, struct verbs_txreq, txreq); in hfi1_verbs_send_dma() 1057 ret = sdma_send_txreq(sde, &qp->s_iowait, &tx->txreq); in hfi1_verbs_send_dma()
|
D | sdma.h | 381 struct sdma_txreq txreq; member
|
D | qp.c | 555 container_of(tx, struct verbs_txreq, txreq)); in flush_tx_list() 1406 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|