Lines Matching refs:conn

65 void rds_send_reset(struct rds_connection *conn)  in rds_send_reset()  argument
70 if (conn->c_xmit_rm) { in rds_send_reset()
71 rm = conn->c_xmit_rm; in rds_send_reset()
72 conn->c_xmit_rm = NULL; in rds_send_reset()
81 conn->c_xmit_sg = 0; in rds_send_reset()
82 conn->c_xmit_hdr_off = 0; in rds_send_reset()
83 conn->c_xmit_data_off = 0; in rds_send_reset()
84 conn->c_xmit_atomic_sent = 0; in rds_send_reset()
85 conn->c_xmit_rdma_sent = 0; in rds_send_reset()
86 conn->c_xmit_data_sent = 0; in rds_send_reset()
88 conn->c_map_queued = 0; in rds_send_reset()
90 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_reset()
91 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_reset()
94 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_reset()
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_reset()
99 list_splice_init(&conn->c_retrans, &conn->c_send_queue); in rds_send_reset()
100 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_reset()
103 static int acquire_in_xmit(struct rds_connection *conn) in acquire_in_xmit() argument
105 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; in acquire_in_xmit()
108 static void release_in_xmit(struct rds_connection *conn) in release_in_xmit() argument
110 clear_bit(RDS_IN_XMIT, &conn->c_flags); in release_in_xmit()
118 if (waitqueue_active(&conn->c_waitq)) in release_in_xmit()
119 wake_up_all(&conn->c_waitq); in release_in_xmit()
136 int rds_send_xmit(struct rds_connection *conn) in rds_send_xmit() argument
157 if (!acquire_in_xmit(conn)) { in rds_send_xmit()
171 conn->c_send_gen++; in rds_send_xmit()
172 send_gen = conn->c_send_gen; in rds_send_xmit()
178 if (!rds_conn_up(conn)) { in rds_send_xmit()
179 release_in_xmit(conn); in rds_send_xmit()
184 if (conn->c_trans->xmit_prepare) in rds_send_xmit()
185 conn->c_trans->xmit_prepare(conn); in rds_send_xmit()
193 rm = conn->c_xmit_rm; in rds_send_xmit()
199 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { in rds_send_xmit()
200 rm = rds_cong_update_alloc(conn); in rds_send_xmit()
207 conn->c_xmit_rm = rm; in rds_send_xmit()
230 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
232 if (!list_empty(&conn->c_send_queue)) { in rds_send_xmit()
233 rm = list_entry(conn->c_send_queue.next, in rds_send_xmit()
242 list_move_tail(&rm->m_conn_item, &conn->c_retrans); in rds_send_xmit()
245 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
259 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_xmit()
262 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_xmit()
268 if (conn->c_unacked_packets == 0 || in rds_send_xmit()
269 conn->c_unacked_bytes < len) { in rds_send_xmit()
272 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_xmit()
273 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_xmit()
276 conn->c_unacked_bytes -= len; in rds_send_xmit()
277 conn->c_unacked_packets--; in rds_send_xmit()
280 conn->c_xmit_rm = rm; in rds_send_xmit()
284 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { in rds_send_xmit()
290 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
296 conn->c_xmit_rdma_sent = 1; in rds_send_xmit()
300 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { in rds_send_xmit()
306 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); in rds_send_xmit()
312 conn->c_xmit_atomic_sent = 1; in rds_send_xmit()
338 if (rm->data.op_active && !conn->c_xmit_data_sent) { in rds_send_xmit()
340 ret = conn->c_trans->xmit(conn, rm, in rds_send_xmit()
341 conn->c_xmit_hdr_off, in rds_send_xmit()
342 conn->c_xmit_sg, in rds_send_xmit()
343 conn->c_xmit_data_off); in rds_send_xmit()
347 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
350 conn->c_xmit_hdr_off); in rds_send_xmit()
351 conn->c_xmit_hdr_off += tmp; in rds_send_xmit()
355 sg = &rm->data.op_sg[conn->c_xmit_sg]; in rds_send_xmit()
358 conn->c_xmit_data_off); in rds_send_xmit()
359 conn->c_xmit_data_off += tmp; in rds_send_xmit()
361 if (conn->c_xmit_data_off == sg->length) { in rds_send_xmit()
362 conn->c_xmit_data_off = 0; in rds_send_xmit()
364 conn->c_xmit_sg++; in rds_send_xmit()
366 conn->c_xmit_sg == rm->data.op_nents); in rds_send_xmit()
370 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
371 (conn->c_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
372 conn->c_xmit_data_sent = 1; in rds_send_xmit()
380 if (!rm->data.op_active || conn->c_xmit_data_sent) { in rds_send_xmit()
381 conn->c_xmit_rm = NULL; in rds_send_xmit()
382 conn->c_xmit_sg = 0; in rds_send_xmit()
383 conn->c_xmit_hdr_off = 0; in rds_send_xmit()
384 conn->c_xmit_data_off = 0; in rds_send_xmit()
385 conn->c_xmit_rdma_sent = 0; in rds_send_xmit()
386 conn->c_xmit_atomic_sent = 0; in rds_send_xmit()
387 conn->c_xmit_data_sent = 0; in rds_send_xmit()
394 if (conn->c_trans->xmit_complete) in rds_send_xmit()
395 conn->c_trans->xmit_complete(conn); in rds_send_xmit()
396 release_in_xmit(conn); in rds_send_xmit()
423 if ((test_bit(0, &conn->c_map_queued) || in rds_send_xmit()
424 !list_empty(&conn->c_send_queue)) && in rds_send_xmit()
425 send_gen == conn->c_send_gen) { in rds_send_xmit()
429 queue_delayed_work(rds_wq, &conn->c_send_w, 1); in rds_send_xmit()
566 struct rds_message *rds_send_get_message(struct rds_connection *conn, in rds_send_get_message() argument
572 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_get_message()
574 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_get_message()
582 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { in rds_send_get_message()
591 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_get_message()
687 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, in rds_send_drop_acked() argument
694 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_acked()
696 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_drop_acked()
708 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_acked()
718 struct rds_connection *conn; in rds_send_drop_to() local
746 conn = rm->m_inc.i_conn; in rds_send_drop_to()
748 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_drop_to()
755 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
762 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_drop_to()
810 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, in rds_send_queue_rm() argument
853 rm->m_inc.i_conn = conn; in rds_send_queue_rm()
856 spin_lock(&conn->c_lock); in rds_send_queue_rm()
857 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); in rds_send_queue_rm()
858 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_queue_rm()
860 spin_unlock(&conn->c_lock); in rds_send_queue_rm()
987 struct rds_connection *conn; in rds_sendmsg() local
1058 conn = rs->rs_conn; in rds_sendmsg()
1060 conn = rds_conn_create_outgoing(sock_net(sock->sk), in rds_sendmsg()
1064 if (IS_ERR(conn)) { in rds_sendmsg()
1065 ret = PTR_ERR(conn); in rds_sendmsg()
1068 rs->rs_conn = conn; in rds_sendmsg()
1076 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
1078 &rm->rdma, conn->c_trans->xmit_rdma); in rds_sendmsg()
1083 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { in rds_sendmsg()
1085 &rm->atomic, conn->c_trans->xmit_atomic); in rds_sendmsg()
1090 rds_conn_connect_if_down(conn); in rds_sendmsg()
1092 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); in rds_sendmsg()
1098 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, in rds_sendmsg()
1108 rds_send_queue_rm(rs, conn, rm, in rds_sendmsg()
1129 ret = rds_send_xmit(conn); in rds_sendmsg()
1131 queue_delayed_work(rds_wq, &conn->c_send_w, 1); in rds_sendmsg()
1152 rds_send_pong(struct rds_connection *conn, __be16 dport) in rds_send_pong() argument
1164 rm->m_daddr = conn->c_faddr; in rds_send_pong()
1167 rds_conn_connect_if_down(conn); in rds_send_pong()
1169 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); in rds_send_pong()
1173 spin_lock_irqsave(&conn->c_lock, flags); in rds_send_pong()
1174 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_pong()
1177 rm->m_inc.i_conn = conn; in rds_send_pong()
1180 conn->c_next_tx_seq); in rds_send_pong()
1181 conn->c_next_tx_seq++; in rds_send_pong()
1182 spin_unlock_irqrestore(&conn->c_lock, flags); in rds_send_pong()
1188 queue_delayed_work(rds_wq, &conn->c_send_w, 1); in rds_send_pong()