Lines Matching refs:send

78 			  struct rds_iw_send_work *send,  in rds_iw_send_unmap_rm()  argument
81 struct rds_message *rm = send->s_rm; in rds_iw_send_unmap_rm()
83 rdsdebug("ic %p send %p rm %p\n", ic, send, rm); in rds_iw_send_unmap_rm()
125 send->s_rm = NULL; in rds_iw_send_unmap_rm()
130 struct rds_iw_send_work *send; in rds_iw_send_init_ring() local
133 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_iw_send_init_ring()
136 send->s_rm = NULL; in rds_iw_send_init_ring()
137 send->s_op = NULL; in rds_iw_send_init_ring()
138 send->s_mapping = NULL; in rds_iw_send_init_ring()
140 send->s_wr.next = NULL; in rds_iw_send_init_ring()
141 send->s_wr.wr_id = i; in rds_iw_send_init_ring()
142 send->s_wr.sg_list = send->s_sge; in rds_iw_send_init_ring()
143 send->s_wr.num_sge = 1; in rds_iw_send_init_ring()
144 send->s_wr.opcode = IB_WR_SEND; in rds_iw_send_init_ring()
145 send->s_wr.send_flags = 0; in rds_iw_send_init_ring()
146 send->s_wr.ex.imm_data = 0; in rds_iw_send_init_ring()
148 sge = rds_iw_data_sge(ic, send->s_sge); in rds_iw_send_init_ring()
151 sge = rds_iw_header_sge(ic, send->s_sge); in rds_iw_send_init_ring()
156 send->s_mr = ib_alloc_fast_reg_mr(ic->i_pd, fastreg_message_size); in rds_iw_send_init_ring()
157 if (IS_ERR(send->s_mr)) { in rds_iw_send_init_ring()
162 send->s_page_list = ib_alloc_fast_reg_page_list( in rds_iw_send_init_ring()
164 if (IS_ERR(send->s_page_list)) { in rds_iw_send_init_ring()
173 struct rds_iw_send_work *send; in rds_iw_send_clear_ring() local
176 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_iw_send_clear_ring()
177 BUG_ON(!send->s_mr); in rds_iw_send_clear_ring()
178 ib_dereg_mr(send->s_mr); in rds_iw_send_clear_ring()
179 BUG_ON(!send->s_page_list); in rds_iw_send_clear_ring()
180 ib_free_fast_reg_page_list(send->s_page_list); in rds_iw_send_clear_ring()
181 if (send->s_wr.opcode == 0xdead) in rds_iw_send_clear_ring()
183 if (send->s_rm) in rds_iw_send_clear_ring()
184 rds_iw_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR); in rds_iw_send_clear_ring()
185 if (send->s_op) in rds_iw_send_clear_ring()
186 rds_iw_send_unmap_rdma(ic, send->s_op); in rds_iw_send_clear_ring()
201 struct rds_iw_send_work *send; in rds_iw_send_cq_comp_handler() local
246 send = &ic->i_sends[oldest]; in rds_iw_send_cq_comp_handler()
249 switch (send->s_wr.opcode) { in rds_iw_send_cq_comp_handler()
251 if (send->s_rm) in rds_iw_send_cq_comp_handler()
252 rds_iw_send_unmap_rm(ic, send, wc.status); in rds_iw_send_cq_comp_handler()
264 __func__, send->s_wr.opcode); in rds_iw_send_cq_comp_handler()
268 send->s_wr.opcode = 0xdead; in rds_iw_send_cq_comp_handler()
269 send->s_wr.num_sge = 1; in rds_iw_send_cq_comp_handler()
270 if (time_after(jiffies, send->s_queued + HZ/2)) in rds_iw_send_cq_comp_handler()
277 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) { in rds_iw_send_cq_comp_handler()
280 rm = rds_send_get_message(conn, send->s_op); in rds_iw_send_cq_comp_handler()
449 struct rds_iw_send_work *send, unsigned int pos, in rds_iw_xmit_populate_wr() argument
455 WARN_ON(pos != send - ic->i_sends); in rds_iw_xmit_populate_wr()
457 send->s_wr.send_flags = send_flags; in rds_iw_xmit_populate_wr()
458 send->s_wr.opcode = IB_WR_SEND; in rds_iw_xmit_populate_wr()
459 send->s_wr.num_sge = 2; in rds_iw_xmit_populate_wr()
460 send->s_wr.next = NULL; in rds_iw_xmit_populate_wr()
461 send->s_queued = jiffies; in rds_iw_xmit_populate_wr()
462 send->s_op = NULL; in rds_iw_xmit_populate_wr()
465 sge = rds_iw_data_sge(ic, send->s_sge); in rds_iw_xmit_populate_wr()
470 sge = rds_iw_header_sge(ic, send->s_sge); in rds_iw_xmit_populate_wr()
474 send->s_wr.num_sge = 1; in rds_iw_xmit_populate_wr()
475 sge = &send->s_sge[0]; in rds_iw_xmit_populate_wr()
501 struct rds_iw_send_work *send = NULL; in rds_iw_xmit() local
622 send = &ic->i_sends[pos]; in rds_iw_xmit()
623 first = send; in rds_iw_xmit()
649 rds_iw_xmit_populate_wr(ic, send, pos, 0, 0, send_flags); in rds_iw_xmit()
657 send = &ic->i_sends[pos]; in rds_iw_xmit()
660 rds_iw_xmit_populate_wr(ic, send, pos, in rds_iw_xmit()
671 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_iw_xmit()
677 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_iw_xmit()
684 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_iw_xmit()
686 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_iw_xmit()
687 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); in rds_iw_xmit()
720 prev->s_wr.next = &send->s_wr; in rds_iw_xmit()
721 prev = send; in rds_iw_xmit()
768 …device *rds_iwdev, struct rds_iw_connection *ic, struct rds_iw_send_work *send, int nent, int len,… in rds_iw_build_send_fastreg() argument
770 BUG_ON(nent > send->s_page_list->max_page_list_len); in rds_iw_build_send_fastreg()
776 send->s_wr.opcode = IB_WR_FAST_REG_MR; in rds_iw_build_send_fastreg()
777 send->s_wr.wr.fast_reg.length = len; in rds_iw_build_send_fastreg()
778 send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; in rds_iw_build_send_fastreg()
779 send->s_wr.wr.fast_reg.page_list = send->s_page_list; in rds_iw_build_send_fastreg()
780 send->s_wr.wr.fast_reg.page_list_len = nent; in rds_iw_build_send_fastreg()
781 send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT; in rds_iw_build_send_fastreg()
782 send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; in rds_iw_build_send_fastreg()
783 send->s_wr.wr.fast_reg.iova_start = sg_addr; in rds_iw_build_send_fastreg()
785 ib_update_fast_reg_key(send->s_mr, send->s_remap_count++); in rds_iw_build_send_fastreg()
791 struct rds_iw_send_work *send = NULL; in rds_iw_xmit_rdma() local
849 send = &ic->i_sends[pos]; in rds_iw_xmit_rdma()
853 first = send; in rds_iw_xmit_rdma()
861 send->s_wr.send_flags = 0; in rds_iw_xmit_rdma()
862 send->s_queued = jiffies; in rds_iw_xmit_rdma()
870 send->s_wr.send_flags = IB_SEND_SIGNALED; in rds_iw_xmit_rdma()
878 send->s_wr.opcode = IB_WR_RDMA_WRITE; in rds_iw_xmit_rdma()
880 send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV; in rds_iw_xmit_rdma()
882 send->s_wr.wr.rdma.remote_addr = remote_addr; in rds_iw_xmit_rdma()
883 send->s_wr.wr.rdma.rkey = op->op_rkey; in rds_iw_xmit_rdma()
884 send->s_op = op; in rds_iw_xmit_rdma()
887 send->s_wr.num_sge = rds_iwdev->max_sge; in rds_iw_xmit_rdma()
890 send->s_wr.num_sge = num_sge; in rds_iw_xmit_rdma()
892 send->s_wr.next = NULL; in rds_iw_xmit_rdma()
895 prev->s_wr.next = &send->s_wr; in rds_iw_xmit_rdma()
897 for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { in rds_iw_xmit_rdma()
900 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) in rds_iw_xmit_rdma()
901 send->s_page_list->page_list[j] = ib_sg_dma_address(ic->i_cm_id->device, scat); in rds_iw_xmit_rdma()
903 send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); in rds_iw_xmit_rdma()
904 send->s_sge[j].length = len; in rds_iw_xmit_rdma()
905 send->s_sge[j].lkey = rds_iw_local_dma_lkey(ic); in rds_iw_xmit_rdma()
915 if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV) { in rds_iw_xmit_rdma()
916 send->s_wr.num_sge = 1; in rds_iw_xmit_rdma()
917 send->s_sge[0].addr = conn->c_xmit_rm->m_rs->rs_user_addr; in rds_iw_xmit_rdma()
918 send->s_sge[0].length = conn->c_xmit_rm->m_rs->rs_user_bytes; in rds_iw_xmit_rdma()
919 send->s_sge[0].lkey = ic->i_sends[fr_pos].s_mr->lkey; in rds_iw_xmit_rdma()
922 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_iw_xmit_rdma()
923 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); in rds_iw_xmit_rdma()
925 prev = send; in rds_iw_xmit_rdma()
926 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) in rds_iw_xmit_rdma()
927 send = ic->i_sends; in rds_iw_xmit_rdma()