txreq             160 drivers/infiniband/hw/hfi1/qp.c 			container_of(tx, struct verbs_txreq, txreq));
txreq             495 drivers/infiniband/hw/hfi1/qp.c 	struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
txreq             391 drivers/infiniband/hw/hfi1/rc.c 		ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
txreq            5305 drivers/infiniband/hw/hfi1/tid_rdma.c 	ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP;
txreq              80 drivers/infiniband/hw/hfi1/user_sdma.c static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
txreq             104 drivers/infiniband/hw/hfi1/user_sdma.c 	struct sdma_txreq *txreq,
txreq             127 drivers/infiniband/hw/hfi1/user_sdma.c 	struct sdma_txreq *txreq,
txreq             135 drivers/infiniband/hw/hfi1/user_sdma.c 	if (sdma_progress(sde, seq, txreq))
txreq             725 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
txreq             730 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
txreq             732 drivers/infiniband/hw/hfi1/user_sdma.c 		sdma_txclean(pq->dd, &tx->txreq);
txreq             756 drivers/infiniband/hw/hfi1/user_sdma.c 	ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
txreq             888 drivers/infiniband/hw/hfi1/user_sdma.c 			ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
txreq             924 drivers/infiniband/hw/hfi1/user_sdma.c 		list_add_tail(&tx->txreq.list, &req->txps);
txreq             951 drivers/infiniband/hw/hfi1/user_sdma.c 	sdma_txclean(pq->dd, &tx->txreq);
txreq            1267 drivers/infiniband/hw/hfi1/user_sdma.c 	return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
txreq            1378 drivers/infiniband/hw/hfi1/user_sdma.c 	sdma_txinit_ahg(&tx->txreq,
txreq            1397 drivers/infiniband/hw/hfi1/user_sdma.c static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
txreq            1400 drivers/infiniband/hw/hfi1/user_sdma.c 		container_of(txreq, struct user_sdma_txreq, txreq);
txreq            1447 drivers/infiniband/hw/hfi1/user_sdma.c 				container_of(t, struct user_sdma_txreq, txreq);
txreq             238 drivers/infiniband/hw/hfi1/user_sdma.h 	struct sdma_txreq txreq;
txreq             631 drivers/infiniband/hw/hfi1/verbs.c 		container_of(cookie, struct verbs_txreq, txreq);
txreq             678 drivers/infiniband/hw/hfi1/verbs.c 		list_add_tail(&ps->s_txreq->txreq.list,
txreq             712 drivers/infiniband/hw/hfi1/verbs.c 			&tx->txreq,
txreq             781 drivers/infiniband/hw/hfi1/verbs.c 			&tx->txreq,
txreq             795 drivers/infiniband/hw/hfi1/verbs.c 			&tx->txreq,
txreq             802 drivers/infiniband/hw/hfi1/verbs.c 			&tx->txreq,
txreq             822 drivers/infiniband/hw/hfi1/verbs.c 		ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
txreq             864 drivers/infiniband/hw/hfi1/verbs.c 	if (!sdma_txreq_built(&tx->txreq)) {
txreq             893 drivers/infiniband/hw/hfi1/verbs.c 	ret =  sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
txreq             941 drivers/infiniband/hw/hfi1/verbs.c 		list_add_tail(&ps->s_txreq->txreq.list,
txreq            1237 drivers/infiniband/hw/hfi1/verbs.c 		    !sdma_txreq_built(&tx->txreq))
txreq              69 drivers/infiniband/hw/hfi1/verbs_txreq.c 	sdma_txclean(dd_from_dev(dev), &tx->txreq);
txreq              60 drivers/infiniband/hw/hfi1/verbs_txreq.h 	struct sdma_txreq       txreq;
txreq              95 drivers/infiniband/hw/hfi1/verbs_txreq.h 	tx->txreq.num_desc = 0;
txreq              98 drivers/infiniband/hw/hfi1/verbs_txreq.h 	tx->txreq.flags = 0;
txreq             104 drivers/infiniband/hw/hfi1/verbs_txreq.h 	return &tx->txreq;
txreq             113 drivers/infiniband/hw/hfi1/verbs_txreq.h 		return container_of(stx, struct verbs_txreq, txreq);
txreq              71 drivers/infiniband/hw/hfi1/vnic_sdma.c 	struct sdma_txreq       txreq;
txreq              80 drivers/infiniband/hw/hfi1/vnic_sdma.c static void vnic_sdma_complete(struct sdma_txreq *txreq,
txreq              83 drivers/infiniband/hw/hfi1/vnic_sdma.c 	struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
txreq              86 drivers/infiniband/hw/hfi1/vnic_sdma.c 	sdma_txclean(vnic_sdma->dd, txreq);
txreq              98 drivers/infiniband/hw/hfi1/vnic_sdma.c 		&tx->txreq,
txreq             109 drivers/infiniband/hw/hfi1/vnic_sdma.c 				      &tx->txreq,
txreq             118 drivers/infiniband/hw/hfi1/vnic_sdma.c 		ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
txreq             134 drivers/infiniband/hw/hfi1/vnic_sdma.c 		&tx->txreq,
txreq             149 drivers/infiniband/hw/hfi1/vnic_sdma.c 		&tx->txreq,
txreq             197 drivers/infiniband/hw/hfi1/vnic_sdma.c 			      &tx->txreq, vnic_sdma->pkts_sent);
txreq             209 drivers/infiniband/hw/hfi1/vnic_sdma.c 	sdma_txclean(dd, &tx->txreq);
txreq             229 drivers/infiniband/hw/hfi1/vnic_sdma.c 				struct sdma_txreq *txreq,
txreq             237 drivers/infiniband/hw/hfi1/vnic_sdma.c 	if (sdma_progress(sde, seq, txreq)) {
txreq             248 drivers/infiniband/hw/qib/qib.h 	struct qib_sdma_txreq   txreq;
txreq             496 drivers/infiniband/hw/qib/qib_sdma.c 	tx->txreq.start_idx = 0;
txreq             497 drivers/infiniband/hw/qib/qib_sdma.c 	tx->txreq.next_descq_idx = 0;
txreq             498 drivers/infiniband/hw/qib/qib_sdma.c 	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
txreq             534 drivers/infiniband/hw/qib/qib_sdma.c 	if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
txreq             544 drivers/infiniband/hw/qib/qib_sdma.c 	make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
txreq             547 drivers/infiniband/hw/qib/qib_sdma.c 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
txreq             563 drivers/infiniband/hw/qib/qib_sdma.c 	tx->txreq.start_idx = tail;
txreq             580 drivers/infiniband/hw/qib/qib_sdma.c 		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
txreq             601 drivers/infiniband/hw/qib/qib_sdma.c 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
txreq             603 drivers/infiniband/hw/qib/qib_sdma.c 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
txreq             607 drivers/infiniband/hw/qib/qib_sdma.c 	tx->txreq.next_descq_idx = tail;
txreq             609 drivers/infiniband/hw/qib/qib_sdma.c 	ppd->sdma_descq_added += tx->txreq.sg_count;
txreq             610 drivers/infiniband/hw/qib/qib_sdma.c 	list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
txreq             576 drivers/infiniband/hw/qib/qib_verbs.c 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
txreq             605 drivers/infiniband/hw/qib/qib_verbs.c 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
txreq             628 drivers/infiniband/hw/qib/qib_verbs.c 	if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
txreq             629 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
txreq             631 drivers/infiniband/hw/qib/qib_verbs.c 				 tx->txreq.addr, tx->hdr_dwords << 2,
txreq             639 drivers/infiniband/hw/qib/qib_verbs.c 	list_add(&tx->txreq.list, &dev->txreq_free);
txreq             687 drivers/infiniband/hw/qib/qib_verbs.c 		if (qpp->s_tx->txreq.sg_count > avail)
txreq             689 drivers/infiniband/hw/qib/qib_verbs.c 		avail -= qpp->s_tx->txreq.sg_count;
txreq             715 drivers/infiniband/hw/qib/qib_verbs.c 		container_of(cookie, struct qib_verbs_txreq, txreq);
txreq             725 drivers/infiniband/hw/qib/qib_verbs.c 		if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
txreq             805 drivers/infiniband/hw/qib/qib_verbs.c 	tx->txreq.callback = sdma_complete;
txreq             807 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
txreq             809 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
txreq             811 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
txreq             828 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
txreq             829 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.sg_count = ndesc;
txreq             830 drivers/infiniband/hw/qib/qib_verbs.c 		tx->txreq.addr = dev->pio_hdrs_phys +
txreq             847 drivers/infiniband/hw/qib/qib_verbs.c 	tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
txreq             849 drivers/infiniband/hw/qib/qib_verbs.c 	if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
txreq             852 drivers/infiniband/hw/qib/qib_verbs.c 	tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
txreq             853 drivers/infiniband/hw/qib/qib_verbs.c 	tx->txreq.sg_count = 1;
txreq            1540 drivers/infiniband/hw/qib/qib_verbs.c 		list_add(&tx->txreq.list, &dev->txreq_free);
txreq            1633 drivers/infiniband/hw/qib/qib_verbs.c 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
txreq            1669 drivers/infiniband/hw/qib/qib_verbs.c 		tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
txreq             799 drivers/net/xen-netback/netback.c 		struct xen_netif_tx_request txreq;
txreq             826 drivers/net/xen-netback/netback.c 		RING_COPY_REQUEST(&queue->tx, idx, &txreq);
txreq             829 drivers/net/xen-netback/netback.c 		if (txreq.size > queue->remaining_credit &&
txreq             830 drivers/net/xen-netback/netback.c 		    tx_credit_exceeded(queue, txreq.size))
txreq             833 drivers/net/xen-netback/netback.c 		queue->remaining_credit -= txreq.size;
txreq             840 drivers/net/xen-netback/netback.c 		if (txreq.flags & XEN_NETTXF_extra_info) {
txreq             855 drivers/net/xen-netback/netback.c 			make_tx_response(queue, &txreq, extra_count,
txreq             869 drivers/net/xen-netback/netback.c 			make_tx_response(queue, &txreq, extra_count,
txreq             875 drivers/net/xen-netback/netback.c 		ret = xenvif_count_requests(queue, &txreq, extra_count,
txreq             882 drivers/net/xen-netback/netback.c 		if (unlikely(txreq.size < ETH_HLEN)) {
txreq             884 drivers/net/xen-netback/netback.c 				   "Bad packet size: %d\n", txreq.size);
txreq             885 drivers/net/xen-netback/netback.c 			xenvif_tx_err(queue, &txreq, extra_count, idx);
txreq             890 drivers/net/xen-netback/netback.c 		if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
txreq             893 drivers/net/xen-netback/netback.c 				   txreq.offset, txreq.size,
txreq             894 drivers/net/xen-netback/netback.c 				   (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
txreq             902 drivers/net/xen-netback/netback.c 		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
txreq             904 drivers/net/xen-netback/netback.c 			XEN_NETBACK_TX_COPY_LEN : txreq.size;
txreq             910 drivers/net/xen-netback/netback.c 			xenvif_tx_err(queue, &txreq, extra_count, idx);
txreq             915 drivers/net/xen-netback/netback.c 		if (data_len < txreq.size)
txreq             930 drivers/net/xen-netback/netback.c 				xenvif_tx_err(queue, &txreq, extra_count, idx);
txreq             981 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
txreq             983 drivers/net/xen-netback/netback.c 		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
txreq             996 drivers/net/xen-netback/netback.c 		if (data_len < txreq.size) {
txreq             999 drivers/net/xen-netback/netback.c 			xenvif_tx_create_map_op(queue, pending_idx, &txreq,
txreq            1006 drivers/net/xen-netback/netback.c 			       &txreq, sizeof(txreq));