Lines Matching refs:priv

80 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,  in ipoib_cm_dma_unmap_rx()  argument
85 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx()
88 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx()
93 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_post_receive_srq() local
97 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq()
99 for (i = 0; i < priv->cm.num_frags; ++i) in ipoib_cm_post_receive_srq()
100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; in ipoib_cm_post_receive_srq()
102 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); in ipoib_cm_post_receive_srq()
104 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); in ipoib_cm_post_receive_srq()
105 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, in ipoib_cm_post_receive_srq()
106 priv->cm.srq_ring[id].mapping); in ipoib_cm_post_receive_srq()
107 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); in ipoib_cm_post_receive_srq()
108 priv->cm.srq_ring[id].skb = NULL; in ipoib_cm_post_receive_srq()
119 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_post_receive_nonsrq() local
130 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); in ipoib_cm_post_receive_nonsrq()
131 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, in ipoib_cm_post_receive_nonsrq()
146 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_alloc_rx_skb() local
160 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, in ipoib_cm_alloc_rx_skb()
162 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { in ipoib_cm_alloc_rx_skb()
174 mapping[i + 1] = ib_dma_map_page(priv->ca, page, in ipoib_cm_alloc_rx_skb()
176 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) in ipoib_cm_alloc_rx_skb()
185 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_alloc_rx_skb()
188 ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_alloc_rx_skb()
197 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_free_rx_ring() local
202 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, in ipoib_cm_free_rx_ring()
210 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) in ipoib_cm_start_rx_drain() argument
217 if (list_empty(&priv->cm.rx_flush_list) || in ipoib_cm_start_rx_drain()
218 !list_empty(&priv->cm.rx_drain_list)) in ipoib_cm_start_rx_drain()
225 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); in ipoib_cm_start_rx_drain()
227 ipoib_warn(priv, "failed to post drain wr\n"); in ipoib_cm_start_rx_drain()
229 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); in ipoib_cm_start_rx_drain()
235 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_rx_event_handler() local
241 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_rx_event_handler()
242 list_move(&p->list, &priv->cm.rx_flush_list); in ipoib_cm_rx_event_handler()
244 ipoib_cm_start_rx_drain(priv); in ipoib_cm_rx_event_handler()
245 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_rx_event_handler()
251 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_rx_qp() local
254 .send_cq = priv->recv_cq, /* For drain WR */ in ipoib_cm_create_rx_qp()
255 .recv_cq = priv->recv_cq, in ipoib_cm_create_rx_qp()
256 .srq = priv->cm.srq, in ipoib_cm_create_rx_qp()
269 return ib_create_qp(priv->pd, &attr); in ipoib_cm_create_rx_qp()
276 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_modify_rx_qp() local
283 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); in ipoib_cm_modify_rx_qp()
288 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); in ipoib_cm_modify_rx_qp()
294 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); in ipoib_cm_modify_rx_qp()
300 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); in ipoib_cm_modify_rx_qp()
315 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); in ipoib_cm_modify_rx_qp()
320 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); in ipoib_cm_modify_rx_qp()
331 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_init_rx_wr() local
334 for (i = 0; i < priv->cm.num_frags; ++i) in ipoib_cm_init_rx_wr()
335 sge[i].lkey = priv->pd->local_dma_lkey; in ipoib_cm_init_rx_wr()
338 for (i = 1; i < priv->cm.num_frags; ++i) in ipoib_cm_init_rx_wr()
343 wr->num_sge = priv->cm.num_frags; in ipoib_cm_init_rx_wr()
349 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_nonsrq_init_rx() local
360 priv->ca->name, ipoib_recvq_size); in ipoib_cm_nonsrq_init_rx()
372 spin_lock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
374 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { in ipoib_cm_nonsrq_init_rx()
375 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
380 ++priv->cm.nonsrq_conn_qp; in ipoib_cm_nonsrq_init_rx()
382 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); in ipoib_cm_nonsrq_init_rx()
394 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " in ipoib_cm_nonsrq_init_rx()
408 spin_lock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
409 --priv->cm.nonsrq_conn_qp; in ipoib_cm_nonsrq_init_rx()
410 spin_unlock_irq(&priv->lock); in ipoib_cm_nonsrq_init_rx()
423 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send_rep() local
427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep()
443 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_req_handler() local
448 ipoib_dbg(priv, "REQ arrived\n"); in ipoib_cm_req_handler()
476 spin_lock_irq(&priv->lock); in ipoib_cm_req_handler()
477 queue_delayed_work(priv->wq, in ipoib_cm_req_handler()
478 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); in ipoib_cm_req_handler()
483 list_move(&p->list, &priv->cm.passive_ids); in ipoib_cm_req_handler()
484 spin_unlock_irq(&priv->lock); in ipoib_cm_req_handler()
488 ipoib_warn(priv, "failed to send REP: %d\n", ret); in ipoib_cm_req_handler()
490 ipoib_warn(priv, "unable to move qp to error state\n"); in ipoib_cm_req_handler()
505 struct ipoib_dev_priv *priv; in ipoib_cm_rx_handler() local
516 priv = netdev_priv(p->dev); in ipoib_cm_rx_handler()
518 ipoib_warn(priv, "unable to move qp to error state\n"); in ipoib_cm_rx_handler()
560 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_handle_rx_wc() local
571 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", in ipoib_cm_handle_rx_wc()
576 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
577 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); in ipoib_cm_handle_rx_wc()
578 ipoib_cm_start_rx_drain(priv); in ipoib_cm_handle_rx_wc()
579 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc()
580 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
582 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", in ipoib_cm_handle_rx_wc()
590 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; in ipoib_cm_handle_rx_wc()
595 ipoib_dbg(priv, "cm recv error " in ipoib_cm_handle_rx_wc()
603 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
604 list_move(&p->list, &priv->cm.rx_reap_list); in ipoib_cm_handle_rx_wc()
605 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
606 queue_work(priv->wq, &priv->cm.rx_reap_task); in ipoib_cm_handle_rx_wc()
614 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
619 list_move(&p->list, &priv->cm.passive_ids); in ipoib_cm_handle_rx_wc()
620 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_rx_wc()
630 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], in ipoib_cm_handle_rx_wc()
633 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], in ipoib_cm_handle_rx_wc()
651 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); in ipoib_cm_handle_rx_wc()
656 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); in ipoib_cm_handle_rx_wc()
659 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", in ipoib_cm_handle_rx_wc()
680 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " in ipoib_cm_handle_rx_wc()
684 &priv->cm.rx_wr, in ipoib_cm_handle_rx_wc()
685 priv->cm.rx_sge, in ipoib_cm_handle_rx_wc()
688 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " in ipoib_cm_handle_rx_wc()
694 static inline int post_send(struct ipoib_dev_priv *priv, in post_send() argument
701 ipoib_build_sge(priv, tx_req); in post_send()
703 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM; in post_send()
705 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr); in post_send()
710 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send() local
715 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", in ipoib_cm_send()
723 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", in ipoib_cm_send()
736 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { in ipoib_cm_send()
745 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req); in ipoib_cm_send()
747 ipoib_warn(priv, "post_send failed, error %d\n", rc); in ipoib_cm_send()
749 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_cm_send()
755 if (++priv->tx_outstanding == ipoib_sendq_size) { in ipoib_cm_send()
756 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", in ipoib_cm_send()
759 rc = ib_req_notify_cq(priv->send_cq, in ipoib_cm_send()
762 ipoib_warn(priv, "request notify on send CQ failed\n"); in ipoib_cm_send()
764 ipoib_send_comp_handler(priv->send_cq, dev); in ipoib_cm_send()
771 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_handle_tx_wc() local
777 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", in ipoib_cm_handle_tx_wc()
781 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", in ipoib_cm_handle_tx_wc()
788 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_cm_handle_tx_wc()
799 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && in ipoib_cm_handle_tx_wc()
801 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) in ipoib_cm_handle_tx_wc()
808 ipoib_dbg(priv, "failed cm send event " in ipoib_cm_handle_tx_wc()
812 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_handle_tx_wc()
823 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_handle_tx_wc()
824 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_handle_tx_wc()
829 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_handle_tx_wc()
837 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_open() local
843 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); in ipoib_cm_dev_open()
844 if (IS_ERR(priv->cm.id)) { in ipoib_cm_dev_open()
845 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); in ipoib_cm_dev_open()
846 ret = PTR_ERR(priv->cm.id); in ipoib_cm_dev_open()
850 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), in ipoib_cm_dev_open()
853 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, in ipoib_cm_dev_open()
854 IPOIB_CM_IETF_ID | priv->qp->qp_num); in ipoib_cm_dev_open()
861 ib_destroy_cm_id(priv->cm.id); in ipoib_cm_dev_open()
863 priv->cm.id = NULL; in ipoib_cm_dev_open()
869 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_free_rx_reap_list() local
873 spin_lock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
874 list_splice_init(&priv->cm.rx_reap_list, &list); in ipoib_cm_free_rx_reap_list()
875 spin_unlock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
881 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); in ipoib_cm_free_rx_reap_list()
882 spin_lock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
883 --priv->cm.nonsrq_conn_qp; in ipoib_cm_free_rx_reap_list()
884 spin_unlock_irq(&priv->lock); in ipoib_cm_free_rx_reap_list()
892 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_stop() local
897 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) in ipoib_cm_dev_stop()
900 ib_destroy_cm_id(priv->cm.id); in ipoib_cm_dev_stop()
901 priv->cm.id = NULL; in ipoib_cm_dev_stop()
903 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
904 while (!list_empty(&priv->cm.passive_ids)) { in ipoib_cm_dev_stop()
905 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); in ipoib_cm_dev_stop()
906 list_move(&p->list, &priv->cm.rx_error_list); in ipoib_cm_dev_stop()
908 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
911 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); in ipoib_cm_dev_stop()
912 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
918 while (!list_empty(&priv->cm.rx_error_list) || in ipoib_cm_dev_stop()
919 !list_empty(&priv->cm.rx_flush_list) || in ipoib_cm_dev_stop()
920 !list_empty(&priv->cm.rx_drain_list)) { in ipoib_cm_dev_stop()
922 ipoib_warn(priv, "RX drain timing out\n"); in ipoib_cm_dev_stop()
927 list_splice_init(&priv->cm.rx_flush_list, in ipoib_cm_dev_stop()
928 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
929 list_splice_init(&priv->cm.rx_error_list, in ipoib_cm_dev_stop()
930 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
931 list_splice_init(&priv->cm.rx_drain_list, in ipoib_cm_dev_stop()
932 &priv->cm.rx_reap_list); in ipoib_cm_dev_stop()
935 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
938 spin_lock_irq(&priv->lock); in ipoib_cm_dev_stop()
941 spin_unlock_irq(&priv->lock); in ipoib_cm_dev_stop()
945 cancel_delayed_work(&priv->cm.stale_task); in ipoib_cm_dev_stop()
951 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_rep_handler() local
961 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", in ipoib_cm_rep_handler()
969 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); in ipoib_cm_rep_handler()
976 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); in ipoib_cm_rep_handler()
983 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); in ipoib_cm_rep_handler()
988 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); in ipoib_cm_rep_handler()
994 spin_lock_irq(&priv->lock); in ipoib_cm_rep_handler()
999 spin_unlock_irq(&priv->lock); in ipoib_cm_rep_handler()
1004 ipoib_warn(priv, "dev_queue_xmit failed " in ipoib_cm_rep_handler()
1010 ipoib_warn(priv, "failed to send RTU: %d\n", ret); in ipoib_cm_rep_handler()
1018 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_tx_qp() local
1020 .send_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
1021 .recv_cq = priv->recv_cq, in ipoib_cm_create_tx_qp()
1022 .srq = priv->cm.srq, in ipoib_cm_create_tx_qp()
1036 tx_qp = ib_create_qp(priv->pd, &attr); in ipoib_cm_create_tx_qp()
1038 ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n", in ipoib_cm_create_tx_qp()
1039 priv->ca->name); in ipoib_cm_create_tx_qp()
1041 tx_qp = ib_create_qp(priv->pd, &attr); in ipoib_cm_create_tx_qp()
1051 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_send_req() local
1055 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_req()
1086 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_modify_tx_init() local
1089 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); in ipoib_cm_modify_tx_init()
1091 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); in ipoib_cm_modify_tx_init()
1097 qp_attr.port_num = priv->port; in ipoib_cm_modify_tx_init()
1102 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); in ipoib_cm_modify_tx_init()
1111 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_tx_init() local
1117 ipoib_warn(priv, "failed to allocate tx ring\n"); in ipoib_cm_tx_init()
1126 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); in ipoib_cm_tx_init()
1130 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); in ipoib_cm_tx_init()
1133 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); in ipoib_cm_tx_init()
1139 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); in ipoib_cm_tx_init()
1145 ipoib_warn(priv, "failed to send cm req: %d\n", ret); in ipoib_cm_tx_init()
1149 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", in ipoib_cm_tx_init()
1169 struct ipoib_dev_priv *priv = netdev_priv(p->dev); in ipoib_cm_tx_destroy() local
1173 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", in ipoib_cm_tx_destroy()
1184 ipoib_warn(priv, "timing out; %d sends not completed\n", in ipoib_cm_tx_destroy()
1197 ipoib_dma_unmap_tx(priv, tx_req); in ipoib_cm_tx_destroy()
1201 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && in ipoib_cm_tx_destroy()
1203 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) in ipoib_cm_tx_destroy()
1219 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); in ipoib_cm_tx_handler() local
1220 struct net_device *dev = priv->dev; in ipoib_cm_tx_handler()
1227 ipoib_dbg(priv, "DREQ received.\n"); in ipoib_cm_tx_handler()
1231 ipoib_dbg(priv, "REP received.\n"); in ipoib_cm_tx_handler()
1240 ipoib_dbg(priv, "CM error %d.\n", event->event); in ipoib_cm_tx_handler()
1242 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_handler()
1253 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_tx_handler()
1254 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_tx_handler()
1257 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_handler()
1270 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_tx() local
1281 list_add(&tx->list, &priv->cm.start_list); in ipoib_cm_create_tx()
1283 queue_work(priv->wq, &priv->cm.start_task); in ipoib_cm_create_tx()
1289 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); in ipoib_cm_destroy_tx() local
1292 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_destroy_tx()
1293 list_move(&tx->list, &priv->cm.reap_list); in ipoib_cm_destroy_tx()
1294 queue_work(priv->wq, &priv->cm.reap_task); in ipoib_cm_destroy_tx()
1295 ipoib_dbg(priv, "Reap connection for gid %pI6\n", in ipoib_cm_destroy_tx()
1298 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_destroy_tx()
1304 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_tx_start() local
1306 struct net_device *dev = priv->dev; in ipoib_cm_tx_start()
1316 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_start()
1318 while (!list_empty(&priv->cm.start_list)) { in ipoib_cm_tx_start()
1319 p = list_entry(priv->cm.start_list.next, typeof(*p), list); in ipoib_cm_tx_start()
1325 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_start()
1331 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_start()
1344 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_start()
1350 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_tx_reap() local
1352 struct net_device *dev = priv->dev; in ipoib_cm_tx_reap()
1357 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_reap()
1359 while (!list_empty(&priv->cm.reap_list)) { in ipoib_cm_tx_reap()
1360 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); in ipoib_cm_tx_reap()
1362 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_reap()
1366 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_tx_reap()
1369 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_tx_reap()
1375 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_skb_reap() local
1377 struct net_device *dev = priv->dev; in ipoib_cm_skb_reap()
1380 unsigned mtu = priv->mcast_mtu; in ipoib_cm_skb_reap()
1383 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_skb_reap()
1385 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { in ipoib_cm_skb_reap()
1386 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_skb_reap()
1398 spin_lock_irqsave(&priv->lock, flags); in ipoib_cm_skb_reap()
1401 spin_unlock_irqrestore(&priv->lock, flags); in ipoib_cm_skb_reap()
1408 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_skb_too_long() local
1409 int e = skb_queue_empty(&priv->cm.skb_queue); in ipoib_cm_skb_too_long()
1414 skb_queue_tail(&priv->cm.skb_queue, skb); in ipoib_cm_skb_too_long()
1416 queue_work(priv->wq, &priv->cm.skb_task); in ipoib_cm_skb_too_long()
1427 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, in ipoib_cm_stale_task() local
1432 spin_lock_irq(&priv->lock); in ipoib_cm_stale_task()
1433 while (!list_empty(&priv->cm.passive_ids)) { in ipoib_cm_stale_task()
1436 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); in ipoib_cm_stale_task()
1439 list_move(&p->list, &priv->cm.rx_error_list); in ipoib_cm_stale_task()
1441 spin_unlock_irq(&priv->lock); in ipoib_cm_stale_task()
1444 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); in ipoib_cm_stale_task()
1445 spin_lock_irq(&priv->lock); in ipoib_cm_stale_task()
1448 if (!list_empty(&priv->cm.passive_ids)) in ipoib_cm_stale_task()
1449 queue_delayed_work(priv->wq, in ipoib_cm_stale_task()
1450 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); in ipoib_cm_stale_task()
1451 spin_unlock_irq(&priv->lock); in ipoib_cm_stale_task()
1457 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); in show_mode() local
1459 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) in show_mode()
1493 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_create_srq() local
1502 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); in ipoib_cm_create_srq()
1503 if (IS_ERR(priv->cm.srq)) { in ipoib_cm_create_srq()
1504 if (PTR_ERR(priv->cm.srq) != -ENOSYS) in ipoib_cm_create_srq()
1506 priv->ca->name, PTR_ERR(priv->cm.srq)); in ipoib_cm_create_srq()
1507 priv->cm.srq = NULL; in ipoib_cm_create_srq()
1511 priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); in ipoib_cm_create_srq()
1512 if (!priv->cm.srq_ring) { in ipoib_cm_create_srq()
1514 priv->ca->name, ipoib_recvq_size); in ipoib_cm_create_srq()
1515 ib_destroy_srq(priv->cm.srq); in ipoib_cm_create_srq()
1516 priv->cm.srq = NULL; in ipoib_cm_create_srq()
1524 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_init() local
1528 INIT_LIST_HEAD(&priv->cm.passive_ids); in ipoib_cm_dev_init()
1529 INIT_LIST_HEAD(&priv->cm.reap_list); in ipoib_cm_dev_init()
1530 INIT_LIST_HEAD(&priv->cm.start_list); in ipoib_cm_dev_init()
1531 INIT_LIST_HEAD(&priv->cm.rx_error_list); in ipoib_cm_dev_init()
1532 INIT_LIST_HEAD(&priv->cm.rx_flush_list); in ipoib_cm_dev_init()
1533 INIT_LIST_HEAD(&priv->cm.rx_drain_list); in ipoib_cm_dev_init()
1534 INIT_LIST_HEAD(&priv->cm.rx_reap_list); in ipoib_cm_dev_init()
1535 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); in ipoib_cm_dev_init()
1536 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); in ipoib_cm_dev_init()
1537 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); in ipoib_cm_dev_init()
1538 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); in ipoib_cm_dev_init()
1539 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); in ipoib_cm_dev_init()
1541 skb_queue_head_init(&priv->cm.skb_queue); in ipoib_cm_dev_init()
1543 ret = ib_query_device(priv->ca, &attr); in ipoib_cm_dev_init()
1549 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge); in ipoib_cm_dev_init()
1554 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10; in ipoib_cm_dev_init()
1555 priv->cm.num_frags = attr.max_srq_sge; in ipoib_cm_dev_init()
1556 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", in ipoib_cm_dev_init()
1557 priv->cm.max_cm_mtu, priv->cm.num_frags); in ipoib_cm_dev_init()
1559 priv->cm.max_cm_mtu = IPOIB_CM_MTU; in ipoib_cm_dev_init()
1560 priv->cm.num_frags = IPOIB_CM_RX_SG; in ipoib_cm_dev_init()
1563 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); in ipoib_cm_dev_init()
1567 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, in ipoib_cm_dev_init()
1568 priv->cm.num_frags - 1, in ipoib_cm_dev_init()
1569 priv->cm.srq_ring[i].mapping, in ipoib_cm_dev_init()
1571 ipoib_warn(priv, "failed to allocate " in ipoib_cm_dev_init()
1578 ipoib_warn(priv, "ipoib_cm_post_receive_srq " in ipoib_cm_dev_init()
1586 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; in ipoib_cm_dev_init()
1592 struct ipoib_dev_priv *priv = netdev_priv(dev); in ipoib_cm_dev_cleanup() local
1595 if (!priv->cm.srq) in ipoib_cm_dev_cleanup()
1598 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); in ipoib_cm_dev_cleanup()
1600 ret = ib_destroy_srq(priv->cm.srq); in ipoib_cm_dev_cleanup()
1602 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); in ipoib_cm_dev_cleanup()
1604 priv->cm.srq = NULL; in ipoib_cm_dev_cleanup()
1605 if (!priv->cm.srq_ring) in ipoib_cm_dev_cleanup()
1608 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); in ipoib_cm_dev_cleanup()
1609 priv->cm.srq_ring = NULL; in ipoib_cm_dev_cleanup()