This source file includes following definitions.
- ipoib_create_ah
- ipoib_free_ah
- ipoib_ud_dma_unmap_rx
- ipoib_ib_post_receive
- ipoib_alloc_rx_skb
- ipoib_ib_post_receives
- ipoib_ib_handle_rx_wc
- ipoib_dma_map_tx
- ipoib_dma_unmap_tx
- ipoib_qp_state_validate_work
- ipoib_ib_handle_tx_wc
- poll_tx
- ipoib_rx_poll
- ipoib_tx_poll
- ipoib_ib_rx_completion
- ipoib_ib_tx_completion
- post_send
- ipoib_send
- __ipoib_reap_ah
- ipoib_reap_ah
- ipoib_flush_ah
- ipoib_stop_ah
- recvs_pending
- check_qp_movement_and_print
- ipoib_napi_enable
- ipoib_napi_disable
- ipoib_ib_dev_stop_default
- ipoib_ib_dev_stop
- ipoib_ib_dev_open_default
- ipoib_ib_dev_open
- ipoib_pkey_dev_check_presence
- ipoib_ib_dev_up
- ipoib_ib_dev_down
- ipoib_drain_cq
- update_parent_pkey
- update_child_pkey
- ipoib_dev_addr_changed_valid
- __ipoib_ib_dev_flush
- ipoib_ib_dev_flush_light
- ipoib_ib_dev_flush_normal
- ipoib_ib_dev_flush_heavy
- ipoib_ib_dev_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <rdma/ib_cache.h>
44
45 #include "ipoib.h"
46
47 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
48 static int data_debug_level;
49
50 module_param(data_debug_level, int, 0644);
51 MODULE_PARM_DESC(data_debug_level,
52 "Enable data path debug tracing if > 0");
53 #endif
54
55 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
56 struct ib_pd *pd, struct rdma_ah_attr *attr)
57 {
58 struct ipoib_ah *ah;
59 struct ib_ah *vah;
60
61 ah = kmalloc(sizeof(*ah), GFP_KERNEL);
62 if (!ah)
63 return ERR_PTR(-ENOMEM);
64
65 ah->dev = dev;
66 ah->last_send = 0;
67 kref_init(&ah->ref);
68
69 vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
70 if (IS_ERR(vah)) {
71 kfree(ah);
72 ah = (struct ipoib_ah *)vah;
73 } else {
74 ah->ah = vah;
75 ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
76 }
77
78 return ah;
79 }
80
81 void ipoib_free_ah(struct kref *kref)
82 {
83 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
84 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
85
86 unsigned long flags;
87
88 spin_lock_irqsave(&priv->lock, flags);
89 list_add_tail(&ah->list, &priv->dead_ahs);
90 spin_unlock_irqrestore(&priv->lock, flags);
91 }
92
93 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
94 u64 mapping[IPOIB_UD_RX_SG])
95 {
96 ib_dma_unmap_single(priv->ca, mapping[0],
97 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
98 DMA_FROM_DEVICE);
99 }
100
101 static int ipoib_ib_post_receive(struct net_device *dev, int id)
102 {
103 struct ipoib_dev_priv *priv = ipoib_priv(dev);
104 int ret;
105
106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
109
110
111 ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
112 if (unlikely(ret)) {
113 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
115 dev_kfree_skb_any(priv->rx_ring[id].skb);
116 priv->rx_ring[id].skb = NULL;
117 }
118
119 return ret;
120 }
121
122 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
123 {
124 struct ipoib_dev_priv *priv = ipoib_priv(dev);
125 struct sk_buff *skb;
126 int buf_size;
127 u64 *mapping;
128
129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
130
131 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
132 if (unlikely(!skb))
133 return NULL;
134
135
136
137
138
139 skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
140
141 mapping = priv->rx_ring[id].mapping;
142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
143 DMA_FROM_DEVICE);
144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
145 goto error;
146
147 priv->rx_ring[id].skb = skb;
148 return skb;
149 error:
150 dev_kfree_skb_any(skb);
151 return NULL;
152 }
153
154 static int ipoib_ib_post_receives(struct net_device *dev)
155 {
156 struct ipoib_dev_priv *priv = ipoib_priv(dev);
157 int i;
158
159 for (i = 0; i < ipoib_recvq_size; ++i) {
160 if (!ipoib_alloc_rx_skb(dev, i)) {
161 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
162 return -ENOMEM;
163 }
164 if (ipoib_ib_post_receive(dev, i)) {
165 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
166 return -EIO;
167 }
168 }
169
170 return 0;
171 }
172
173 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 {
175 struct ipoib_dev_priv *priv = ipoib_priv(dev);
176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
177 struct sk_buff *skb;
178 u64 mapping[IPOIB_UD_RX_SG];
179 union ib_gid *dgid;
180 union ib_gid *sgid;
181
182 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
183 wr_id, wc->status);
184
185 if (unlikely(wr_id >= ipoib_recvq_size)) {
186 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
187 wr_id, ipoib_recvq_size);
188 return;
189 }
190
191 skb = priv->rx_ring[wr_id].skb;
192
193 if (unlikely(wc->status != IB_WC_SUCCESS)) {
194 if (wc->status != IB_WC_WR_FLUSH_ERR)
195 ipoib_warn(priv,
196 "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
197 wc->status, wr_id, wc->vendor_err);
198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
199 dev_kfree_skb_any(skb);
200 priv->rx_ring[wr_id].skb = NULL;
201 return;
202 }
203
204 memcpy(mapping, priv->rx_ring[wr_id].mapping,
205 IPOIB_UD_RX_SG * sizeof(*mapping));
206
207
208
209
210
211 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
212 ++dev->stats.rx_dropped;
213 goto repost;
214 }
215
216 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
217 wc->byte_len, wc->slid);
218
219 ipoib_ud_dma_unmap_rx(priv, mapping);
220
221 skb_put(skb, wc->byte_len);
222
223
224 dgid = &((struct ib_grh *)skb->data)->dgid;
225
226 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
227 skb->pkt_type = PACKET_HOST;
228 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
229 skb->pkt_type = PACKET_BROADCAST;
230 else
231 skb->pkt_type = PACKET_MULTICAST;
232
233 sgid = &((struct ib_grh *)skb->data)->sgid;
234
235
236
237
238
239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
240 int need_repost = 1;
241
242 if ((wc->wc_flags & IB_WC_GRH) &&
243 sgid->global.interface_id != priv->local_gid.global.interface_id)
244 need_repost = 0;
245
246 if (need_repost) {
247 dev_kfree_skb_any(skb);
248 goto repost;
249 }
250 }
251
252 skb_pull(skb, IB_GRH_BYTES);
253
254 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
255 skb_add_pseudo_hdr(skb);
256
257 ++dev->stats.rx_packets;
258 dev->stats.rx_bytes += skb->len;
259 if (skb->pkt_type == PACKET_MULTICAST)
260 dev->stats.multicast++;
261
262 skb->dev = dev;
263 if ((dev->features & NETIF_F_RXCSUM) &&
264 likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
265 skb->ip_summed = CHECKSUM_UNNECESSARY;
266
267 napi_gro_receive(&priv->recv_napi, skb);
268
269 repost:
270 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
271 ipoib_warn(priv, "ipoib_ib_post_receive failed "
272 "for buf %d\n", wr_id);
273 }
274
275 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
276 {
277 struct sk_buff *skb = tx_req->skb;
278 u64 *mapping = tx_req->mapping;
279 int i;
280 int off;
281
282 if (skb_headlen(skb)) {
283 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
284 DMA_TO_DEVICE);
285 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
286 return -EIO;
287
288 off = 1;
289 } else
290 off = 0;
291
292 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
293 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
294 mapping[i + off] = ib_dma_map_page(ca,
295 skb_frag_page(frag),
296 skb_frag_off(frag),
297 skb_frag_size(frag),
298 DMA_TO_DEVICE);
299 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
300 goto partial_error;
301 }
302 return 0;
303
304 partial_error:
305 for (; i > 0; --i) {
306 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
307
308 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
309 }
310
311 if (off)
312 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
313
314 return -EIO;
315 }
316
317 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
318 struct ipoib_tx_buf *tx_req)
319 {
320 struct sk_buff *skb = tx_req->skb;
321 u64 *mapping = tx_req->mapping;
322 int i;
323 int off;
324
325 if (skb_headlen(skb)) {
326 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
327 DMA_TO_DEVICE);
328 off = 1;
329 } else
330 off = 0;
331
332 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
333 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
334
335 ib_dma_unmap_page(priv->ca, mapping[i + off],
336 skb_frag_size(frag), DMA_TO_DEVICE);
337 }
338 }
339
340
341
342
343
344
345 static void ipoib_qp_state_validate_work(struct work_struct *work)
346 {
347 struct ipoib_qp_state_validate *qp_work =
348 container_of(work, struct ipoib_qp_state_validate, work);
349
350 struct ipoib_dev_priv *priv = qp_work->priv;
351 struct ib_qp_attr qp_attr;
352 struct ib_qp_init_attr query_init_attr;
353 int ret;
354
355 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
356 if (ret) {
357 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
358 __func__, ret);
359 goto free_res;
360 }
361 pr_info("%s: QP: 0x%x is in state: %d\n",
362 __func__, priv->qp->qp_num, qp_attr.qp_state);
363
364
365 if (qp_attr.qp_state == IB_QPS_SQE) {
366 qp_attr.qp_state = IB_QPS_RTS;
367
368 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
369 if (ret) {
370 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
371 ret, priv->qp->qp_num);
372 goto free_res;
373 }
374 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
375 __func__, priv->qp->qp_num);
376 } else {
377 pr_warn("QP (%d) will stay in state: %d\n",
378 priv->qp->qp_num, qp_attr.qp_state);
379 }
380
381 free_res:
382 kfree(qp_work);
383 }
384
385 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
386 {
387 struct ipoib_dev_priv *priv = ipoib_priv(dev);
388 unsigned int wr_id = wc->wr_id;
389 struct ipoib_tx_buf *tx_req;
390
391 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
392 wr_id, wc->status);
393
394 if (unlikely(wr_id >= ipoib_sendq_size)) {
395 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
396 wr_id, ipoib_sendq_size);
397 return;
398 }
399
400 tx_req = &priv->tx_ring[wr_id];
401
402 ipoib_dma_unmap_tx(priv, tx_req);
403
404 ++dev->stats.tx_packets;
405 dev->stats.tx_bytes += tx_req->skb->len;
406
407 dev_kfree_skb_any(tx_req->skb);
408
409 ++priv->tx_tail;
410 ++priv->global_tx_tail;
411
412 if (unlikely(netif_queue_stopped(dev) &&
413 ((priv->global_tx_head - priv->global_tx_tail) <=
414 ipoib_sendq_size >> 1) &&
415 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
416 netif_wake_queue(dev);
417
418 if (wc->status != IB_WC_SUCCESS &&
419 wc->status != IB_WC_WR_FLUSH_ERR) {
420 struct ipoib_qp_state_validate *qp_work;
421 ipoib_warn(priv,
422 "failed send event (status=%d, wrid=%d vend_err %#x)\n",
423 wc->status, wr_id, wc->vendor_err);
424 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
425 if (!qp_work)
426 return;
427
428 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
429 qp_work->priv = priv;
430 queue_work(priv->wq, &qp_work->work);
431 }
432 }
433
434 static int poll_tx(struct ipoib_dev_priv *priv)
435 {
436 int n, i;
437 struct ib_wc *wc;
438
439 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
440 for (i = 0; i < n; ++i) {
441 wc = priv->send_wc + i;
442 if (wc->wr_id & IPOIB_OP_CM)
443 ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
444 else
445 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
446 }
447 return n == MAX_SEND_CQE;
448 }
449
450 int ipoib_rx_poll(struct napi_struct *napi, int budget)
451 {
452 struct ipoib_dev_priv *priv =
453 container_of(napi, struct ipoib_dev_priv, recv_napi);
454 struct net_device *dev = priv->dev;
455 int done;
456 int t;
457 int n, i;
458
459 done = 0;
460
461 poll_more:
462 while (done < budget) {
463 int max = (budget - done);
464
465 t = min(IPOIB_NUM_WC, max);
466 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
467
468 for (i = 0; i < n; i++) {
469 struct ib_wc *wc = priv->ibwc + i;
470
471 if (wc->wr_id & IPOIB_OP_RECV) {
472 ++done;
473 if (wc->wr_id & IPOIB_OP_CM)
474 ipoib_cm_handle_rx_wc(dev, wc);
475 else
476 ipoib_ib_handle_rx_wc(dev, wc);
477 } else {
478 pr_warn("%s: Got unexpected wqe id\n", __func__);
479 }
480 }
481
482 if (n != t)
483 break;
484 }
485
486 if (done < budget) {
487 napi_complete(napi);
488 if (unlikely(ib_req_notify_cq(priv->recv_cq,
489 IB_CQ_NEXT_COMP |
490 IB_CQ_REPORT_MISSED_EVENTS)) &&
491 napi_reschedule(napi))
492 goto poll_more;
493 }
494
495 return done;
496 }
497
498 int ipoib_tx_poll(struct napi_struct *napi, int budget)
499 {
500 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
501 send_napi);
502 struct net_device *dev = priv->dev;
503 int n, i;
504 struct ib_wc *wc;
505
506 poll_more:
507 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
508
509 for (i = 0; i < n; i++) {
510 wc = priv->send_wc + i;
511 if (wc->wr_id & IPOIB_OP_CM)
512 ipoib_cm_handle_tx_wc(dev, wc);
513 else
514 ipoib_ib_handle_tx_wc(dev, wc);
515 }
516
517 if (n < budget) {
518 napi_complete(napi);
519 if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
520 IB_CQ_REPORT_MISSED_EVENTS)) &&
521 napi_reschedule(napi))
522 goto poll_more;
523 }
524 return n < 0 ? 0 : n;
525 }
526
527 void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
528 {
529 struct ipoib_dev_priv *priv = ctx_ptr;
530
531 napi_schedule(&priv->recv_napi);
532 }
533
534 void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
535 {
536 struct ipoib_dev_priv *priv = ctx_ptr;
537
538 napi_schedule(&priv->send_napi);
539 }
540
541 static inline int post_send(struct ipoib_dev_priv *priv,
542 unsigned int wr_id,
543 struct ib_ah *address, u32 dqpn,
544 struct ipoib_tx_buf *tx_req,
545 void *head, int hlen)
546 {
547 struct sk_buff *skb = tx_req->skb;
548
549 ipoib_build_sge(priv, tx_req);
550
551 priv->tx_wr.wr.wr_id = wr_id;
552 priv->tx_wr.remote_qpn = dqpn;
553 priv->tx_wr.ah = address;
554
555 if (head) {
556 priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
557 priv->tx_wr.header = head;
558 priv->tx_wr.hlen = hlen;
559 priv->tx_wr.wr.opcode = IB_WR_LSO;
560 } else
561 priv->tx_wr.wr.opcode = IB_WR_SEND;
562
563 return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
564 }
565
566 int ipoib_send(struct net_device *dev, struct sk_buff *skb,
567 struct ib_ah *address, u32 dqpn)
568 {
569 struct ipoib_dev_priv *priv = ipoib_priv(dev);
570 struct ipoib_tx_buf *tx_req;
571 int hlen, rc;
572 void *phead;
573 unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
574
575 if (skb_is_gso(skb)) {
576 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
577 phead = skb->data;
578 if (unlikely(!skb_pull(skb, hlen))) {
579 ipoib_warn(priv, "linear data too small\n");
580 ++dev->stats.tx_dropped;
581 ++dev->stats.tx_errors;
582 dev_kfree_skb_any(skb);
583 return -1;
584 }
585 } else {
586 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
587 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
588 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
589 ++dev->stats.tx_dropped;
590 ++dev->stats.tx_errors;
591 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
592 return -1;
593 }
594 phead = NULL;
595 hlen = 0;
596 }
597 if (skb_shinfo(skb)->nr_frags > usable_sge) {
598 if (skb_linearize(skb) < 0) {
599 ipoib_warn(priv, "skb could not be linearized\n");
600 ++dev->stats.tx_dropped;
601 ++dev->stats.tx_errors;
602 dev_kfree_skb_any(skb);
603 return -1;
604 }
605
606 if (skb_shinfo(skb)->nr_frags > usable_sge) {
607 ipoib_warn(priv, "too many frags after skb linearize\n");
608 ++dev->stats.tx_dropped;
609 ++dev->stats.tx_errors;
610 dev_kfree_skb_any(skb);
611 return -1;
612 }
613 }
614
615 ipoib_dbg_data(priv,
616 "sending packet, length=%d address=%p dqpn=0x%06x\n",
617 skb->len, address, dqpn);
618
619
620
621
622
623
624
625
626 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
627 tx_req->skb = skb;
628 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
629 ++dev->stats.tx_errors;
630 dev_kfree_skb_any(skb);
631 return -1;
632 }
633
634 if (skb->ip_summed == CHECKSUM_PARTIAL)
635 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
636 else
637 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
638
639 if ((priv->global_tx_head - priv->global_tx_tail) ==
640 ipoib_sendq_size - 1) {
641 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
642 netif_stop_queue(dev);
643 }
644
645 skb_orphan(skb);
646 skb_dst_drop(skb);
647
648 if (netif_queue_stopped(dev))
649 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
650 IB_CQ_REPORT_MISSED_EVENTS) < 0)
651 ipoib_warn(priv, "request notify on send CQ failed\n");
652
653 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
654 address, dqpn, tx_req, phead, hlen);
655 if (unlikely(rc)) {
656 ipoib_warn(priv, "post_send failed, error %d\n", rc);
657 ++dev->stats.tx_errors;
658 ipoib_dma_unmap_tx(priv, tx_req);
659 dev_kfree_skb_any(skb);
660 if (netif_queue_stopped(dev))
661 netif_wake_queue(dev);
662 rc = 0;
663 } else {
664 netif_trans_update(dev);
665
666 rc = priv->tx_head;
667 ++priv->tx_head;
668 ++priv->global_tx_head;
669 }
670 return rc;
671 }
672
673 static void __ipoib_reap_ah(struct net_device *dev)
674 {
675 struct ipoib_dev_priv *priv = ipoib_priv(dev);
676 struct ipoib_ah *ah, *tah;
677 unsigned long flags;
678
679 netif_tx_lock_bh(dev);
680 spin_lock_irqsave(&priv->lock, flags);
681
682 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
683 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
684 list_del(&ah->list);
685 rdma_destroy_ah(ah->ah, 0);
686 kfree(ah);
687 }
688
689 spin_unlock_irqrestore(&priv->lock, flags);
690 netif_tx_unlock_bh(dev);
691 }
692
693 void ipoib_reap_ah(struct work_struct *work)
694 {
695 struct ipoib_dev_priv *priv =
696 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
697 struct net_device *dev = priv->dev;
698
699 __ipoib_reap_ah(dev);
700
701 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
702 queue_delayed_work(priv->wq, &priv->ah_reap_task,
703 round_jiffies_relative(HZ));
704 }
705
706 static void ipoib_flush_ah(struct net_device *dev)
707 {
708 struct ipoib_dev_priv *priv = ipoib_priv(dev);
709
710 cancel_delayed_work(&priv->ah_reap_task);
711 flush_workqueue(priv->wq);
712 ipoib_reap_ah(&priv->ah_reap_task.work);
713 }
714
715 static void ipoib_stop_ah(struct net_device *dev)
716 {
717 struct ipoib_dev_priv *priv = ipoib_priv(dev);
718
719 set_bit(IPOIB_STOP_REAPER, &priv->flags);
720 ipoib_flush_ah(dev);
721 }
722
723 static int recvs_pending(struct net_device *dev)
724 {
725 struct ipoib_dev_priv *priv = ipoib_priv(dev);
726 int pending = 0;
727 int i;
728
729 for (i = 0; i < ipoib_recvq_size; ++i)
730 if (priv->rx_ring[i].skb)
731 ++pending;
732
733 return pending;
734 }
735
736 static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
737 struct ib_qp *qp,
738 enum ib_qp_state new_state)
739 {
740 struct ib_qp_attr qp_attr;
741 struct ib_qp_init_attr query_init_attr;
742 int ret;
743
744 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
745 if (ret) {
746 ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
747 return;
748 }
749
750 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
751 ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
752 else
753 ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
754 new_state, qp_attr.qp_state);
755 }
756
757 static void ipoib_napi_enable(struct net_device *dev)
758 {
759 struct ipoib_dev_priv *priv = ipoib_priv(dev);
760
761 napi_enable(&priv->recv_napi);
762 napi_enable(&priv->send_napi);
763 }
764
765 static void ipoib_napi_disable(struct net_device *dev)
766 {
767 struct ipoib_dev_priv *priv = ipoib_priv(dev);
768
769 napi_disable(&priv->recv_napi);
770 napi_disable(&priv->send_napi);
771 }
772
773 int ipoib_ib_dev_stop_default(struct net_device *dev)
774 {
775 struct ipoib_dev_priv *priv = ipoib_priv(dev);
776 struct ib_qp_attr qp_attr;
777 unsigned long begin;
778 struct ipoib_tx_buf *tx_req;
779 int i;
780
781 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
782 ipoib_napi_disable(dev);
783
784 ipoib_cm_dev_stop(dev);
785
786
787
788
789
790 qp_attr.qp_state = IB_QPS_ERR;
791 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
792 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
793
794
795 begin = jiffies;
796
797 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
798 if (time_after(jiffies, begin + 5 * HZ)) {
799 ipoib_warn(priv,
800 "timing out; %d sends %d receives not completed\n",
801 priv->tx_head - priv->tx_tail,
802 recvs_pending(dev));
803
804
805
806
807
808 while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
809 tx_req = &priv->tx_ring[priv->tx_tail &
810 (ipoib_sendq_size - 1)];
811 ipoib_dma_unmap_tx(priv, tx_req);
812 dev_kfree_skb_any(tx_req->skb);
813 ++priv->tx_tail;
814 ++priv->global_tx_tail;
815 }
816
817 for (i = 0; i < ipoib_recvq_size; ++i) {
818 struct ipoib_rx_buf *rx_req;
819
820 rx_req = &priv->rx_ring[i];
821 if (!rx_req->skb)
822 continue;
823 ipoib_ud_dma_unmap_rx(priv,
824 priv->rx_ring[i].mapping);
825 dev_kfree_skb_any(rx_req->skb);
826 rx_req->skb = NULL;
827 }
828
829 goto timeout;
830 }
831
832 ipoib_drain_cq(dev);
833
834 usleep_range(1000, 2000);
835 }
836
837 ipoib_dbg(priv, "All sends and receives done.\n");
838
839 timeout:
840 qp_attr.qp_state = IB_QPS_RESET;
841 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
842 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
843
844 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
845
846 return 0;
847 }
848
849 int ipoib_ib_dev_stop(struct net_device *dev)
850 {
851 struct ipoib_dev_priv *priv = ipoib_priv(dev);
852
853 priv->rn_ops->ndo_stop(dev);
854
855 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
856 ipoib_flush_ah(dev);
857
858 return 0;
859 }
860
861 int ipoib_ib_dev_open_default(struct net_device *dev)
862 {
863 struct ipoib_dev_priv *priv = ipoib_priv(dev);
864 int ret;
865
866 ret = ipoib_init_qp(dev);
867 if (ret) {
868 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
869 return -1;
870 }
871
872 ret = ipoib_ib_post_receives(dev);
873 if (ret) {
874 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
875 goto out;
876 }
877
878 ret = ipoib_cm_dev_open(dev);
879 if (ret) {
880 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
881 goto out;
882 }
883
884 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
885 ipoib_napi_enable(dev);
886
887 return 0;
888 out:
889 return -1;
890 }
891
892 int ipoib_ib_dev_open(struct net_device *dev)
893 {
894 struct ipoib_dev_priv *priv = ipoib_priv(dev);
895
896 ipoib_pkey_dev_check_presence(dev);
897
898 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
899 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
900 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
901 return -1;
902 }
903
904 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
905 queue_delayed_work(priv->wq, &priv->ah_reap_task,
906 round_jiffies_relative(HZ));
907
908 if (priv->rn_ops->ndo_open(dev)) {
909 pr_warn("%s: Failed to open dev\n", dev->name);
910 goto dev_stop;
911 }
912
913 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
914
915 return 0;
916
917 dev_stop:
918 set_bit(IPOIB_STOP_REAPER, &priv->flags);
919 cancel_delayed_work(&priv->ah_reap_task);
920 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
921 ipoib_ib_dev_stop(dev);
922 return -1;
923 }
924
925 void ipoib_pkey_dev_check_presence(struct net_device *dev)
926 {
927 struct ipoib_dev_priv *priv = ipoib_priv(dev);
928 struct rdma_netdev *rn = netdev_priv(dev);
929
930 if (!(priv->pkey & 0x7fff) ||
931 ib_find_pkey(priv->ca, priv->port, priv->pkey,
932 &priv->pkey_index)) {
933 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
934 } else {
935 if (rn->set_id)
936 rn->set_id(dev, priv->pkey_index);
937 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
938 }
939 }
940
941 void ipoib_ib_dev_up(struct net_device *dev)
942 {
943 struct ipoib_dev_priv *priv = ipoib_priv(dev);
944
945 ipoib_pkey_dev_check_presence(dev);
946
947 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
948 ipoib_dbg(priv, "PKEY is not assigned.\n");
949 return;
950 }
951
952 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
953
954 ipoib_mcast_start_thread(dev);
955 }
956
957 void ipoib_ib_dev_down(struct net_device *dev)
958 {
959 struct ipoib_dev_priv *priv = ipoib_priv(dev);
960
961 ipoib_dbg(priv, "downing ib_dev\n");
962
963 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
964 netif_carrier_off(dev);
965
966 ipoib_mcast_stop_thread(dev);
967 ipoib_mcast_dev_flush(dev);
968
969 ipoib_flush_paths(dev);
970 }
971
972 void ipoib_drain_cq(struct net_device *dev)
973 {
974 struct ipoib_dev_priv *priv = ipoib_priv(dev);
975 int i, n;
976
977
978
979
980
981
982 local_bh_disable();
983
984 do {
985 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
986 for (i = 0; i < n; ++i) {
987
988
989
990
991
992 if (priv->ibwc[i].status == IB_WC_SUCCESS)
993 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
994
995 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
996 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
997 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
998 else
999 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
1000 } else {
1001 pr_warn("%s: Got unexpected wqe id\n", __func__);
1002 }
1003 }
1004 } while (n == IPOIB_NUM_WC);
1005
1006 while (poll_tx(priv))
1007 ;
1008
1009 local_bh_enable();
1010 }
1011
1012
1013
1014
1015
1016 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1017 {
1018 int result;
1019 u16 prev_pkey;
1020
1021 prev_pkey = priv->pkey;
1022 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1023 if (result) {
1024 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1025 priv->port, result);
1026 return result;
1027 }
1028
1029 priv->pkey |= 0x8000;
1030
1031 if (prev_pkey != priv->pkey) {
1032 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
1033 prev_pkey, priv->pkey);
1034
1035
1036
1037
1038 priv->dev->broadcast[8] = priv->pkey >> 8;
1039 priv->dev->broadcast[9] = priv->pkey & 0xff;
1040 return 0;
1041 }
1042
1043 return 1;
1044 }
1045
1046
1047
1048 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1049 {
1050 u16 old_index = priv->pkey_index;
1051
1052 priv->pkey_index = 0;
1053 ipoib_pkey_dev_check_presence(priv->dev);
1054
1055 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1056 (old_index == priv->pkey_index))
1057 return 1;
1058 return 0;
1059 }
1060
1061
1062
1063
1064
1065 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1066 {
1067 union ib_gid search_gid;
1068 union ib_gid gid0;
1069 union ib_gid *netdev_gid;
1070 int err;
1071 u16 index;
1072 u8 port;
1073 bool ret = false;
1074
1075 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1076 if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
1077 return false;
1078
1079 netif_addr_lock_bh(priv->dev);
1080
1081
1082
1083
1084 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1085 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1086 search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1087
1088 search_gid.global.interface_id = priv->local_gid.global.interface_id;
1089
1090 netif_addr_unlock_bh(priv->dev);
1091
1092 err = ib_find_gid(priv->ca, &search_gid, &port, &index);
1093
1094 netif_addr_lock_bh(priv->dev);
1095
1096 if (search_gid.global.interface_id !=
1097 priv->local_gid.global.interface_id)
1098
1099
1100
1101 goto out;
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1129 if (!err && port == priv->port) {
1130 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1131 if (index == 0)
1132 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1133 &priv->flags);
1134 else
1135 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1136 ret = true;
1137 } else {
1138 ret = false;
1139 }
1140 } else {
1141 if (!err && port == priv->port) {
1142 ret = true;
1143 } else {
1144 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1145 memcpy(&priv->local_gid, &gid0,
1146 sizeof(priv->local_gid));
1147 memcpy(priv->dev->dev_addr + 4, &gid0,
1148 sizeof(priv->local_gid));
1149 ret = true;
1150 }
1151 }
1152 }
1153
1154 out:
1155 netif_addr_unlock_bh(priv->dev);
1156
1157 return ret;
1158 }
1159
1160 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1161 enum ipoib_flush_level level,
1162 int nesting)
1163 {
1164 struct ipoib_dev_priv *cpriv;
1165 struct net_device *dev = priv->dev;
1166 int result;
1167
1168 down_read_nested(&priv->vlan_rwsem, nesting);
1169
1170
1171
1172
1173
1174 list_for_each_entry(cpriv, &priv->child_intfs, list)
1175 __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1176
1177 up_read(&priv->vlan_rwsem);
1178
1179 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1180 level != IPOIB_FLUSH_HEAVY) {
1181
1182 if (level == IPOIB_FLUSH_LIGHT)
1183 ipoib_dev_addr_changed_valid(priv);
1184 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1185 return;
1186 }
1187
1188 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1189
1190 if (level == IPOIB_FLUSH_HEAVY) {
1191 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1192 update_parent_pkey(priv);
1193 else
1194 update_child_pkey(priv);
1195 } else if (level == IPOIB_FLUSH_LIGHT)
1196 ipoib_dev_addr_changed_valid(priv);
1197 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1198 return;
1199 }
1200
1201 if (level == IPOIB_FLUSH_HEAVY) {
1202
1203
1204
1205 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1206 result = update_child_pkey(priv);
1207 if (result) {
1208
1209 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1210 return;
1211 }
1212
1213 } else {
1214 result = update_parent_pkey(priv);
1215
1216 if (result) {
1217 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1218 return;
1219 }
1220 }
1221 }
1222
1223 if (level == IPOIB_FLUSH_LIGHT) {
1224 int oper_up;
1225 ipoib_mark_paths_invalid(dev);
1226
1227
1228
1229
1230
1231 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1232 ipoib_mcast_dev_flush(dev);
1233 if (oper_up)
1234 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1235 ipoib_flush_ah(dev);
1236 }
1237
1238 if (level >= IPOIB_FLUSH_NORMAL)
1239 ipoib_ib_dev_down(dev);
1240
1241 if (level == IPOIB_FLUSH_HEAVY) {
1242 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1243 ipoib_ib_dev_stop(dev);
1244
1245 if (ipoib_ib_dev_open(dev))
1246 return;
1247
1248 if (netif_queue_stopped(dev))
1249 netif_start_queue(dev);
1250 }
1251
1252
1253
1254
1255
1256 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1257 if (level >= IPOIB_FLUSH_NORMAL)
1258 ipoib_ib_dev_up(dev);
1259 if (ipoib_dev_addr_changed_valid(priv))
1260 ipoib_mcast_restart_task(&priv->restart_task);
1261 }
1262 }
1263
1264 void ipoib_ib_dev_flush_light(struct work_struct *work)
1265 {
1266 struct ipoib_dev_priv *priv =
1267 container_of(work, struct ipoib_dev_priv, flush_light);
1268
1269 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1270 }
1271
1272 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1273 {
1274 struct ipoib_dev_priv *priv =
1275 container_of(work, struct ipoib_dev_priv, flush_normal);
1276
1277 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1278 }
1279
1280 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1281 {
1282 struct ipoib_dev_priv *priv =
1283 container_of(work, struct ipoib_dev_priv, flush_heavy);
1284
1285 rtnl_lock();
1286 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1287 rtnl_unlock();
1288 }
1289
1290 void ipoib_ib_dev_cleanup(struct net_device *dev)
1291 {
1292 struct ipoib_dev_priv *priv = ipoib_priv(dev);
1293
1294 ipoib_dbg(priv, "cleaning up ib_dev\n");
1295
1296
1297
1298
1299 ipoib_flush_paths(dev);
1300
1301 ipoib_mcast_stop_thread(dev);
1302 ipoib_mcast_dev_flush(dev);
1303
1304
1305
1306
1307
1308
1309
1310 ipoib_stop_ah(dev);
1311
1312 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1313
1314 priv->rn_ops->ndo_uninit(dev);
1315
1316 if (priv->pd) {
1317 ib_dealloc_pd(priv->pd);
1318 priv->pd = NULL;
1319 }
1320 }
1321