Home
last modified time | relevance | path

Searched refs:op_sg (Results 1 – 8 of 8) sorted by relevance

/linux-4.4.14/net/rds/
Dtcp_send.c125 sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
126 rm->data.op_sg[sg].offset + off, in rds_tcp_xmit()
127 rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
129 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), in rds_tcp_xmit()
130 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, in rds_tcp_xmit()
137 if (off == rm->data.op_sg[sg].length) { in rds_tcp_xmit()
Dmessage.c65 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i])); in rds_message_purge()
67 __free_page(sg_page(&rm->data.op_sg[i])); in rds_message_purge()
252 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); in rds_message_map_pages()
253 if (!rm->data.op_sg) { in rds_message_map_pages()
259 sg_set_page(&rm->data.op_sg[i], in rds_message_map_pages()
279 sg = rm->data.op_sg; in rds_message_copy_from_user()
323 sg = rm->data.op_sg; in rds_message_inc_copy_to_user()
Dib_send.c77 op->op_sg, op->op_nents, in rds_ib_send_unmap_data()
87 op->op_sg, op->op_nents, in rds_ib_send_unmap_rdma()
127 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, in rds_ib_send_unmap_atomic()
512 scat = &rm->data.op_sg[sg]; in rds_ib_xmit()
551 rm->data.op_sg, in rds_ib_xmit()
621 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit()
642 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
693 && scat != &rm->data.op_sg[rm->data.op_count]); in rds_ib_xmit()
701 if (scat == &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
801 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); in rds_ib_xmit_atomic()
[all …]
Diw_send.c71 op->op_sg, op->op_nents, in rds_iw_send_unmap_rdma()
86 rm->data.op_sg, rm->data.op_nents, in rds_iw_send_unmap_rm()
559 rm->data.op_sg, in rds_iw_xmit()
619 scat = &rm->data.op_sg[rm->data.op_dmasg]; in rds_iw_xmit()
648 for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) { in rds_iw_xmit()
728 if (scat == &rm->data.op_sg[rm->data.op_count]) { in rds_iw_xmit()
811 op->op_sg, op->op_nents, (op->op_write) ? in rds_iw_xmit_rdma()
855 scat = &op->op_sg[0]; in rds_iw_xmit_rdma()
860 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { in rds_iw_xmit_rdma()
898 scat != &op->op_sg[op->op_count]; j++) { in rds_iw_xmit_rdma()
[all …]
Drdma.c449 struct page *page = sg_page(&ro->op_sg[i]); in rds_rdma_free_op()
468 struct page *page = sg_page(ao->op_sg); in rds_atomic_free_op()
610 op->op_sg = rds_message_alloc_sgs(rm, nr_pages); in rds_cmsg_rdma_args()
611 if (!op->op_sg) { in rds_cmsg_rdma_args()
674 sg = &op->op_sg[op->op_nents + j]; in rds_cmsg_rdma_args()
817 rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); in rds_cmsg_atomic()
818 if (!rm->atomic.op_sg) { in rds_cmsg_atomic()
834 sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr)); in rds_cmsg_atomic()
Dloop.c64 struct scatterlist *sgp = &rm->data.op_sg[sg]; in rds_loop_xmit()
Drds.h356 struct scatterlist *op_sg; member
374 struct scatterlist *op_sg; member
385 struct scatterlist *op_sg; member
Dsend.c355 sg = &rm->data.op_sg[conn->c_xmit_sg]; in rds_send_xmit()
1042 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); in rds_sendmsg()
1043 if (!rm->data.op_sg) { in rds_sendmsg()