Lines Matching refs:rm
67 struct rds_message *rm, *tmp; in rds_send_reset() local
71 rm = conn->c_xmit_rm; in rds_send_reset()
77 rds_message_unmapped(rm); in rds_send_reset()
78 rds_message_put(rm); in rds_send_reset()
95 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_reset()
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_reset()
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); in rds_send_reset()
138 struct rds_message *rm; in rds_send_xmit() local
193 rm = conn->c_xmit_rm; in rds_send_xmit()
199 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { in rds_send_xmit()
200 rm = rds_cong_update_alloc(conn); in rds_send_xmit()
201 if (IS_ERR(rm)) { in rds_send_xmit()
202 ret = PTR_ERR(rm); in rds_send_xmit()
205 rm->data.op_active = 1; in rds_send_xmit()
207 conn->c_xmit_rm = rm; in rds_send_xmit()
217 if (!rm) { in rds_send_xmit()
233 rm = list_entry(conn->c_send_queue.next, in rds_send_xmit()
236 rds_message_addref(rm); in rds_send_xmit()
242 list_move_tail(&rm->m_conn_item, &conn->c_retrans); in rds_send_xmit()
247 if (!rm) in rds_send_xmit()
257 if (rm->rdma.op_active && in rds_send_xmit()
258 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { in rds_send_xmit()
260 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) in rds_send_xmit()
261 list_move(&rm->m_conn_item, &to_be_dropped); in rds_send_xmit()
267 len = ntohl(rm->m_inc.i_hdr.h_len); in rds_send_xmit()
270 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_xmit()
280 conn->c_xmit_rm = rm; in rds_send_xmit()
284 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { in rds_send_xmit()
285 rm->m_final_op = &rm->rdma; in rds_send_xmit()
289 set_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
290 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); in rds_send_xmit()
292 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
293 wake_up_interruptible(&rm->m_flush_wait); in rds_send_xmit()
300 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { in rds_send_xmit()
301 rm->m_final_op = &rm->atomic; in rds_send_xmit()
305 set_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
306 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); in rds_send_xmit()
308 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); in rds_send_xmit()
309 wake_up_interruptible(&rm->m_flush_wait); in rds_send_xmit()
323 if (rm->data.op_nents == 0) { in rds_send_xmit()
327 ops_present = (rm->atomic.op_active || rm->rdma.op_active); in rds_send_xmit()
328 if (rm->atomic.op_active && !rm->atomic.op_silent) in rds_send_xmit()
330 if (rm->rdma.op_active && !rm->rdma.op_silent) in rds_send_xmit()
334 && !rm->m_rdma_cookie) in rds_send_xmit()
335 rm->data.op_active = 0; in rds_send_xmit()
338 if (rm->data.op_active && !conn->c_xmit_data_sent) { in rds_send_xmit()
339 rm->m_final_op = &rm->data; in rds_send_xmit()
340 ret = conn->c_trans->xmit(conn, rm, in rds_send_xmit()
355 sg = &rm->data.op_sg[conn->c_xmit_sg]; in rds_send_xmit()
366 conn->c_xmit_sg == rm->data.op_nents); in rds_send_xmit()
371 (conn->c_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
380 if (!rm->data.op_active || conn->c_xmit_data_sent) { in rds_send_xmit()
389 rds_message_put(rm); in rds_send_xmit()
401 list_for_each_entry(rm, &to_be_dropped, m_conn_item) in rds_send_xmit()
402 rds_message_put(rm); in rds_send_xmit()
437 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) in rds_send_sndbuf_remove() argument
439 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); in rds_send_sndbuf_remove()
450 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, in rds_send_is_acked() argument
454 return is_acked(rm, ack); in rds_send_is_acked()
455 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; in rds_send_is_acked()
464 void rds_rdma_send_complete(struct rds_message *rm, int status) in rds_rdma_send_complete() argument
471 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_rdma_send_complete()
473 ro = &rm->rdma; in rds_rdma_send_complete()
474 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && in rds_rdma_send_complete()
477 rs = rm->m_rs; in rds_rdma_send_complete()
488 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_rdma_send_complete()
500 void rds_atomic_send_complete(struct rds_message *rm, int status) in rds_atomic_send_complete() argument
507 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_atomic_send_complete()
509 ao = &rm->atomic; in rds_atomic_send_complete()
510 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) in rds_atomic_send_complete()
513 rs = rm->m_rs; in rds_atomic_send_complete()
524 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_atomic_send_complete()
539 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) in __rds_send_complete() argument
544 ro = &rm->rdma; in __rds_send_complete()
551 ao = &rm->atomic; in __rds_send_complete()
569 struct rds_message *rm, *tmp, *found = NULL; in rds_send_get_message() local
574 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_get_message()
575 if (&rm->rdma == op) { in rds_send_get_message()
576 atomic_inc(&rm->m_refcount); in rds_send_get_message()
577 found = rm; in rds_send_get_message()
582 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { in rds_send_get_message()
583 if (&rm->rdma == op) { in rds_send_get_message()
584 atomic_inc(&rm->m_refcount); in rds_send_get_message()
585 found = rm; in rds_send_get_message()
609 struct rds_message *rm; in rds_send_remove_from_sock() local
614 rm = list_entry(messages->next, struct rds_message, in rds_send_remove_from_sock()
616 list_del_init(&rm->m_conn_item); in rds_send_remove_from_sock()
628 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_remove_from_sock()
629 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) in rds_send_remove_from_sock()
632 if (rs != rm->m_rs) { in rds_send_remove_from_sock()
637 rs = rm->m_rs; in rds_send_remove_from_sock()
645 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { in rds_send_remove_from_sock()
646 struct rm_rdma_op *ro = &rm->rdma; in rds_send_remove_from_sock()
649 list_del_init(&rm->m_sock_item); in rds_send_remove_from_sock()
650 rds_send_sndbuf_remove(rs, rm); in rds_send_remove_from_sock()
659 rm->rdma.op_notifier = NULL; in rds_send_remove_from_sock()
662 rm->m_rs = NULL; in rds_send_remove_from_sock()
667 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_remove_from_sock()
668 rds_message_put(rm); in rds_send_remove_from_sock()
670 rds_message_put(rm); in rds_send_remove_from_sock()
690 struct rds_message *rm, *tmp; in rds_send_drop_acked() local
696 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { in rds_send_drop_acked()
697 if (!rds_send_is_acked(rm, ack, is_acked)) in rds_send_drop_acked()
700 list_move(&rm->m_conn_item, &list); in rds_send_drop_acked()
701 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_drop_acked()
717 struct rds_message *rm, *tmp; in rds_send_drop_to() local
725 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { in rds_send_drop_to()
726 if (dest && (dest->sin_addr.s_addr != rm->m_daddr || in rds_send_drop_to()
727 dest->sin_port != rm->m_inc.i_hdr.h_dport)) in rds_send_drop_to()
730 list_move(&rm->m_sock_item, &list); in rds_send_drop_to()
731 rds_send_sndbuf_remove(rs, rm); in rds_send_drop_to()
732 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); in rds_send_drop_to()
744 list_for_each_entry(rm, &list, m_sock_item) { in rds_send_drop_to()
746 conn = rm->m_inc.i_conn; in rds_send_drop_to()
754 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { in rds_send_drop_to()
756 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_drop_to()
757 rm->m_rs = NULL; in rds_send_drop_to()
758 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_drop_to()
761 list_del_init(&rm->m_conn_item); in rds_send_drop_to()
768 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_drop_to()
771 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); in rds_send_drop_to()
774 rm->m_rs = NULL; in rds_send_drop_to()
775 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_drop_to()
777 rds_message_put(rm); in rds_send_drop_to()
783 rm = list_entry(list.next, struct rds_message, m_sock_item); in rds_send_drop_to()
784 list_del_init(&rm->m_sock_item); in rds_send_drop_to()
785 rds_message_wait(rm); in rds_send_drop_to()
792 spin_lock_irqsave(&rm->m_rs_lock, flags); in rds_send_drop_to()
795 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); in rds_send_drop_to()
798 rm->m_rs = NULL; in rds_send_drop_to()
799 spin_unlock_irqrestore(&rm->m_rs_lock, flags); in rds_send_drop_to()
801 rds_message_put(rm); in rds_send_drop_to()
811 struct rds_message *rm, __be16 sport, in rds_send_queue_rm() argument
820 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); in rds_send_queue_rm()
843 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); in rds_send_queue_rm()
845 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); in rds_send_queue_rm()
846 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); in rds_send_queue_rm()
847 rds_message_addref(rm); in rds_send_queue_rm()
848 rm->m_rs = rs; in rds_send_queue_rm()
852 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); in rds_send_queue_rm()
853 rm->m_inc.i_conn = conn; in rds_send_queue_rm()
854 rds_message_addref(rm); in rds_send_queue_rm()
857 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); in rds_send_queue_rm()
858 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_queue_rm()
859 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_queue_rm()
863 rm, len, rs, rs->rs_snd_bytes, in rds_send_queue_rm()
864 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); in rds_send_queue_rm()
931 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, in rds_cmsg_send() argument
949 ret = rds_cmsg_rdma_args(rs, rm, cmsg); in rds_cmsg_send()
953 ret = rds_cmsg_rdma_dest(rs, rm, cmsg); in rds_cmsg_send()
957 ret = rds_cmsg_rdma_map(rs, rm, cmsg); in rds_cmsg_send()
965 ret = rds_cmsg_atomic(rs, rm, cmsg); in rds_cmsg_send()
986 struct rds_message *rm = NULL; in rds_sendmsg() local
1034 rm = rds_message_alloc(ret, GFP_KERNEL); in rds_sendmsg()
1035 if (!rm) { in rds_sendmsg()
1042 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); in rds_sendmsg()
1043 if (!rm->data.op_sg) { in rds_sendmsg()
1047 ret = rds_message_copy_from_user(rm, &msg->msg_iter); in rds_sendmsg()
1051 rm->data.op_active = 1; in rds_sendmsg()
1053 rm->m_daddr = daddr; in rds_sendmsg()
1072 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); in rds_sendmsg()
1076 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { in rds_sendmsg()
1078 &rm->rdma, conn->c_trans->xmit_rdma); in rds_sendmsg()
1083 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { in rds_sendmsg()
1085 &rm->atomic, conn->c_trans->xmit_atomic); in rds_sendmsg()
1098 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, in rds_sendmsg()
1108 rds_send_queue_rm(rs, conn, rm, in rds_sendmsg()
1133 rds_message_put(rm); in rds_sendmsg()
1141 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); in rds_sendmsg()
1143 if (rm) in rds_sendmsg()
1144 rds_message_put(rm); in rds_sendmsg()
1154 struct rds_message *rm; in rds_send_pong() local
1158 rm = rds_message_alloc(0, GFP_ATOMIC); in rds_send_pong()
1159 if (!rm) { in rds_send_pong()
1164 rm->m_daddr = conn->c_faddr; in rds_send_pong()
1165 rm->data.op_active = 1; in rds_send_pong()
1174 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); in rds_send_pong()
1175 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); in rds_send_pong()
1176 rds_message_addref(rm); in rds_send_pong()
1177 rm->m_inc.i_conn = conn; in rds_send_pong()
1179 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, in rds_send_pong()
1190 rds_message_put(rm); in rds_send_pong()
1194 if (rm) in rds_send_pong()
1195 rds_message_put(rm); in rds_send_pong()