This source file includes following definitions.
- iwcm_reject_msg
- get_work
- put_work
- dealloc_work_entries
- alloc_work_entries
- copy_private_data
- free_cm_id
- iwcm_deref_id
- add_ref
- rem_ref
- iw_create_cm_id
- iwcm_modify_qp_err
- iwcm_modify_qp_sqd
- iw_cm_disconnect
- destroy_cm_id
- iw_destroy_cm_id
- iw_cm_check_wildcard
- iw_cm_map
- iw_cm_listen
- iw_cm_reject
- iw_cm_accept
- iw_cm_connect
- cm_conn_req_handler
- cm_conn_est_handler
- cm_conn_rep_handler
- cm_disconnect_handler
- cm_close_handler
- process_event
- cm_work_handler
- cm_event_handler
- iwcm_init_qp_init_attr
- iwcm_init_qp_rts_attr
- iw_cm_init_qp_attr
- iw_cm_init
- iw_cm_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #include <linux/dma-mapping.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/rbtree.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45 #include <linux/workqueue.h>
46 #include <linux/completion.h>
47 #include <linux/slab.h>
48 #include <linux/module.h>
49 #include <linux/sysctl.h>
50
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/iw_portmap.h>
54 #include <rdma/rdma_netlink.h>
55
56 #include "iwcm.h"
57
58 MODULE_AUTHOR("Tom Tucker");
59 MODULE_DESCRIPTION("iWARP CM");
60 MODULE_LICENSE("Dual BSD/GPL");
61
62 static const char * const iwcm_rej_reason_strs[] = {
63 [ECONNRESET] = "reset by remote host",
64 [ECONNREFUSED] = "refused by remote application",
65 [ETIMEDOUT] = "setup timeout",
66 };
67
68 const char *__attribute_const__ iwcm_reject_msg(int reason)
69 {
70 size_t index;
71
72
73 index = -reason;
74
75 if (index < ARRAY_SIZE(iwcm_rej_reason_strs) &&
76 iwcm_rej_reason_strs[index])
77 return iwcm_rej_reason_strs[index];
78 else
79 return "unrecognized reason";
80 }
81 EXPORT_SYMBOL(iwcm_reject_msg);
82
83 static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
87 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
88 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
89 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
90 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb},
91 [RDMA_NL_IWPM_HELLO] = {.dump = iwpm_hello_cb}
92 };
93
94 static struct workqueue_struct *iwcm_wq;
95 struct iwcm_work {
96 struct work_struct work;
97 struct iwcm_id_private *cm_id;
98 struct list_head list;
99 struct iw_cm_event event;
100 struct list_head free_list;
101 };
102
103 static unsigned int default_backlog = 256;
104
105 static struct ctl_table_header *iwcm_ctl_table_hdr;
106 static struct ctl_table iwcm_ctl_table[] = {
107 {
108 .procname = "default_backlog",
109 .data = &default_backlog,
110 .maxlen = sizeof(default_backlog),
111 .mode = 0644,
112 .proc_handler = proc_dointvec,
113 },
114 { }
115 };
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
142 {
143 struct iwcm_work *work;
144
145 if (list_empty(&cm_id_priv->work_free_list))
146 return NULL;
147 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work,
148 free_list);
149 list_del_init(&work->free_list);
150 return work;
151 }
152
153 static void put_work(struct iwcm_work *work)
154 {
155 list_add(&work->free_list, &work->cm_id->work_free_list);
156 }
157
158 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
159 {
160 struct list_head *e, *tmp;
161
162 list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
163 list_del(e);
164 kfree(list_entry(e, struct iwcm_work, free_list));
165 }
166 }
167
168 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
169 {
170 struct iwcm_work *work;
171
172 BUG_ON(!list_empty(&cm_id_priv->work_free_list));
173 while (count--) {
174 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL);
175 if (!work) {
176 dealloc_work_entries(cm_id_priv);
177 return -ENOMEM;
178 }
179 work->cm_id = cm_id_priv;
180 INIT_LIST_HEAD(&work->list);
181 put_work(work);
182 }
183 return 0;
184 }
185
186
187
188
189
190
191 static int copy_private_data(struct iw_cm_event *event)
192 {
193 void *p;
194
195 p = kmemdup(event->private_data, event->private_data_len, GFP_ATOMIC);
196 if (!p)
197 return -ENOMEM;
198 event->private_data = p;
199 return 0;
200 }
201
202 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
203 {
204 dealloc_work_entries(cm_id_priv);
205 kfree(cm_id_priv);
206 }
207
208
209
210
211
212 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
213 {
214 BUG_ON(atomic_read(&cm_id_priv->refcount)==0);
215 if (atomic_dec_and_test(&cm_id_priv->refcount)) {
216 BUG_ON(!list_empty(&cm_id_priv->work_list));
217 free_cm_id(cm_id_priv);
218 return 1;
219 }
220
221 return 0;
222 }
223
224 static void add_ref(struct iw_cm_id *cm_id)
225 {
226 struct iwcm_id_private *cm_id_priv;
227 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
228 atomic_inc(&cm_id_priv->refcount);
229 }
230
231 static void rem_ref(struct iw_cm_id *cm_id)
232 {
233 struct iwcm_id_private *cm_id_priv;
234
235 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
236
237 (void)iwcm_deref_id(cm_id_priv);
238 }
239
240 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
241
242 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
243 iw_cm_handler cm_handler,
244 void *context)
245 {
246 struct iwcm_id_private *cm_id_priv;
247
248 cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL);
249 if (!cm_id_priv)
250 return ERR_PTR(-ENOMEM);
251
252 cm_id_priv->state = IW_CM_STATE_IDLE;
253 cm_id_priv->id.device = device;
254 cm_id_priv->id.cm_handler = cm_handler;
255 cm_id_priv->id.context = context;
256 cm_id_priv->id.event_handler = cm_event_handler;
257 cm_id_priv->id.add_ref = add_ref;
258 cm_id_priv->id.rem_ref = rem_ref;
259 spin_lock_init(&cm_id_priv->lock);
260 atomic_set(&cm_id_priv->refcount, 1);
261 init_waitqueue_head(&cm_id_priv->connect_wait);
262 init_completion(&cm_id_priv->destroy_comp);
263 INIT_LIST_HEAD(&cm_id_priv->work_list);
264 INIT_LIST_HEAD(&cm_id_priv->work_free_list);
265
266 return &cm_id_priv->id;
267 }
268 EXPORT_SYMBOL(iw_create_cm_id);
269
270
271 static int iwcm_modify_qp_err(struct ib_qp *qp)
272 {
273 struct ib_qp_attr qp_attr;
274
275 if (!qp)
276 return -EINVAL;
277
278 qp_attr.qp_state = IB_QPS_ERR;
279 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
280 }
281
282
283
284
285
286 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
287 {
288 struct ib_qp_attr qp_attr;
289
290 BUG_ON(qp == NULL);
291 qp_attr.qp_state = IB_QPS_SQD;
292 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
293 }
294
295
296
297
298
299
300
301
302
303
304
305
306
307 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
308 {
309 struct iwcm_id_private *cm_id_priv;
310 unsigned long flags;
311 int ret = 0;
312 struct ib_qp *qp = NULL;
313
314 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
315
316 wait_event(cm_id_priv->connect_wait,
317 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
318
319 spin_lock_irqsave(&cm_id_priv->lock, flags);
320 switch (cm_id_priv->state) {
321 case IW_CM_STATE_ESTABLISHED:
322 cm_id_priv->state = IW_CM_STATE_CLOSING;
323
324
325 if (cm_id_priv->qp)
326 qp = cm_id_priv->qp;
327 else
328 ret = -EINVAL;
329 break;
330 case IW_CM_STATE_LISTEN:
331 ret = -EINVAL;
332 break;
333 case IW_CM_STATE_CLOSING:
334
335 case IW_CM_STATE_IDLE:
336
337 break;
338 case IW_CM_STATE_CONN_RECV:
339
340
341
342
343 break;
344 case IW_CM_STATE_CONN_SENT:
345
346 default:
347 BUG();
348 }
349 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
350
351 if (qp) {
352 if (abrupt)
353 ret = iwcm_modify_qp_err(qp);
354 else
355 ret = iwcm_modify_qp_sqd(qp);
356
357
358
359
360
361 ret = 0;
362 }
363
364 return ret;
365 }
366 EXPORT_SYMBOL(iw_cm_disconnect);
367
368
369
370
371
372
373
374 static void destroy_cm_id(struct iw_cm_id *cm_id)
375 {
376 struct iwcm_id_private *cm_id_priv;
377 struct ib_qp *qp;
378 unsigned long flags;
379
380 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
381
382
383
384
385 wait_event(cm_id_priv->connect_wait,
386 !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags));
387
388
389
390
391
392 set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
393
394 spin_lock_irqsave(&cm_id_priv->lock, flags);
395 qp = cm_id_priv->qp;
396 cm_id_priv->qp = NULL;
397
398 switch (cm_id_priv->state) {
399 case IW_CM_STATE_LISTEN:
400 cm_id_priv->state = IW_CM_STATE_DESTROYING;
401 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
402
403 cm_id->device->ops.iw_destroy_listen(cm_id);
404 spin_lock_irqsave(&cm_id_priv->lock, flags);
405 break;
406 case IW_CM_STATE_ESTABLISHED:
407 cm_id_priv->state = IW_CM_STATE_DESTROYING;
408 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
409
410 (void)iwcm_modify_qp_err(qp);
411 spin_lock_irqsave(&cm_id_priv->lock, flags);
412 break;
413 case IW_CM_STATE_IDLE:
414 case IW_CM_STATE_CLOSING:
415 cm_id_priv->state = IW_CM_STATE_DESTROYING;
416 break;
417 case IW_CM_STATE_CONN_RECV:
418
419
420
421
422
423
424 cm_id_priv->state = IW_CM_STATE_DESTROYING;
425 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
426 cm_id->device->ops.iw_reject(cm_id, NULL, 0);
427 spin_lock_irqsave(&cm_id_priv->lock, flags);
428 break;
429 case IW_CM_STATE_CONN_SENT:
430 case IW_CM_STATE_DESTROYING:
431 default:
432 BUG();
433 break;
434 }
435 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
436 if (qp)
437 cm_id_priv->id.device->ops.iw_rem_ref(qp);
438
439 if (cm_id->mapped) {
440 iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
441 iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
442 }
443
444 (void)iwcm_deref_id(cm_id_priv);
445 }
446
447
448
449
450
451
452
453 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
454 {
455 destroy_cm_id(cm_id);
456 }
457 EXPORT_SYMBOL(iw_destroy_cm_id);
458
459
460
461
462
463
464
465
466
467
468 static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
469 struct sockaddr_storage *cm_addr,
470 struct sockaddr_storage *cm_outaddr)
471 {
472 if (pm_addr->ss_family == AF_INET) {
473 struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
474
475 if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
476 struct sockaddr_in *cm4_addr =
477 (struct sockaddr_in *)cm_addr;
478 struct sockaddr_in *cm4_outaddr =
479 (struct sockaddr_in *)cm_outaddr;
480
481 cm4_outaddr->sin_addr = cm4_addr->sin_addr;
482 }
483 } else {
484 struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
485
486 if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
487 struct sockaddr_in6 *cm6_addr =
488 (struct sockaddr_in6 *)cm_addr;
489 struct sockaddr_in6 *cm6_outaddr =
490 (struct sockaddr_in6 *)cm_outaddr;
491
492 cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
493 }
494 }
495 }
496
497
498
499
500
501
502
503
504
505
506
507
508 static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
509 {
510 const char *devname = dev_name(&cm_id->device->dev);
511 const char *ifname = cm_id->device->iw_ifname;
512 struct iwpm_dev_data pm_reg_msg = {};
513 struct iwpm_sa_data pm_msg;
514 int status;
515
516 if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) ||
517 strlen(ifname) >= sizeof(pm_reg_msg.if_name))
518 return -EINVAL;
519
520 cm_id->m_local_addr = cm_id->local_addr;
521 cm_id->m_remote_addr = cm_id->remote_addr;
522
523 strcpy(pm_reg_msg.dev_name, devname);
524 strcpy(pm_reg_msg.if_name, ifname);
525
526 if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
527 !iwpm_valid_pid())
528 return 0;
529
530 cm_id->mapped = true;
531 pm_msg.loc_addr = cm_id->local_addr;
532 pm_msg.rem_addr = cm_id->remote_addr;
533 pm_msg.flags = (cm_id->device->iw_driver_flags & IW_F_NO_PORT_MAP) ?
534 IWPM_FLAGS_NO_PORT_MAP : 0;
535 if (active)
536 status = iwpm_add_and_query_mapping(&pm_msg,
537 RDMA_NL_IWCM);
538 else
539 status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
540
541 if (!status) {
542 cm_id->m_local_addr = pm_msg.mapped_loc_addr;
543 if (active) {
544 cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
545 iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
546 &cm_id->remote_addr,
547 &cm_id->m_remote_addr);
548 }
549 }
550
551 return iwpm_create_mapinfo(&cm_id->local_addr,
552 &cm_id->m_local_addr,
553 RDMA_NL_IWCM, pm_msg.flags);
554 }
555
556
557
558
559
560
561
562 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
563 {
564 struct iwcm_id_private *cm_id_priv;
565 unsigned long flags;
566 int ret;
567
568 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
569
570 if (!backlog)
571 backlog = default_backlog;
572
573 ret = alloc_work_entries(cm_id_priv, backlog);
574 if (ret)
575 return ret;
576
577 spin_lock_irqsave(&cm_id_priv->lock, flags);
578 switch (cm_id_priv->state) {
579 case IW_CM_STATE_IDLE:
580 cm_id_priv->state = IW_CM_STATE_LISTEN;
581 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
582 ret = iw_cm_map(cm_id, false);
583 if (!ret)
584 ret = cm_id->device->ops.iw_create_listen(cm_id,
585 backlog);
586 if (ret)
587 cm_id_priv->state = IW_CM_STATE_IDLE;
588 spin_lock_irqsave(&cm_id_priv->lock, flags);
589 break;
590 default:
591 ret = -EINVAL;
592 }
593 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
594
595 return ret;
596 }
597 EXPORT_SYMBOL(iw_cm_listen);
598
599
600
601
602
603
604 int iw_cm_reject(struct iw_cm_id *cm_id,
605 const void *private_data,
606 u8 private_data_len)
607 {
608 struct iwcm_id_private *cm_id_priv;
609 unsigned long flags;
610 int ret;
611
612 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
613 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
614
615 spin_lock_irqsave(&cm_id_priv->lock, flags);
616 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
617 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
618 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
619 wake_up_all(&cm_id_priv->connect_wait);
620 return -EINVAL;
621 }
622 cm_id_priv->state = IW_CM_STATE_IDLE;
623 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
624
625 ret = cm_id->device->ops.iw_reject(cm_id, private_data,
626 private_data_len);
627
628 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
629 wake_up_all(&cm_id_priv->connect_wait);
630
631 return ret;
632 }
633 EXPORT_SYMBOL(iw_cm_reject);
634
635
636
637
638
639
640
641
642 int iw_cm_accept(struct iw_cm_id *cm_id,
643 struct iw_cm_conn_param *iw_param)
644 {
645 struct iwcm_id_private *cm_id_priv;
646 struct ib_qp *qp;
647 unsigned long flags;
648 int ret;
649
650 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
651 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
652
653 spin_lock_irqsave(&cm_id_priv->lock, flags);
654 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
655 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
656 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
657 wake_up_all(&cm_id_priv->connect_wait);
658 return -EINVAL;
659 }
660
661 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
662 if (!qp) {
663 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
664 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
665 wake_up_all(&cm_id_priv->connect_wait);
666 return -EINVAL;
667 }
668 cm_id->device->ops.iw_add_ref(qp);
669 cm_id_priv->qp = qp;
670 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
671
672 ret = cm_id->device->ops.iw_accept(cm_id, iw_param);
673 if (ret) {
674
675 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
676 cm_id_priv->state = IW_CM_STATE_IDLE;
677 spin_lock_irqsave(&cm_id_priv->lock, flags);
678 qp = cm_id_priv->qp;
679 cm_id_priv->qp = NULL;
680 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
681 if (qp)
682 cm_id->device->ops.iw_rem_ref(qp);
683 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
684 wake_up_all(&cm_id_priv->connect_wait);
685 }
686
687 return ret;
688 }
689 EXPORT_SYMBOL(iw_cm_accept);
690
691
692
693
694
695
696
697
698 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
699 {
700 struct iwcm_id_private *cm_id_priv;
701 int ret;
702 unsigned long flags;
703 struct ib_qp *qp = NULL;
704
705 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
706
707 ret = alloc_work_entries(cm_id_priv, 4);
708 if (ret)
709 return ret;
710
711 set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
712 spin_lock_irqsave(&cm_id_priv->lock, flags);
713
714 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
715 ret = -EINVAL;
716 goto err;
717 }
718
719
720 qp = cm_id->device->ops.iw_get_qp(cm_id->device, iw_param->qpn);
721 if (!qp) {
722 ret = -EINVAL;
723 goto err;
724 }
725 cm_id->device->ops.iw_add_ref(qp);
726 cm_id_priv->qp = qp;
727 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
728 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
729
730 ret = iw_cm_map(cm_id, true);
731 if (!ret)
732 ret = cm_id->device->ops.iw_connect(cm_id, iw_param);
733 if (!ret)
734 return 0;
735
736 spin_lock_irqsave(&cm_id_priv->lock, flags);
737 qp = cm_id_priv->qp;
738 cm_id_priv->qp = NULL;
739 cm_id_priv->state = IW_CM_STATE_IDLE;
740 err:
741 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
742 if (qp)
743 cm_id->device->ops.iw_rem_ref(qp);
744 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
745 wake_up_all(&cm_id_priv->connect_wait);
746 return ret;
747 }
748 EXPORT_SYMBOL(iw_cm_connect);
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
766 struct iw_cm_event *iw_event)
767 {
768 unsigned long flags;
769 struct iw_cm_id *cm_id;
770 struct iwcm_id_private *cm_id_priv;
771 int ret;
772
773
774
775
776
777 BUG_ON(iw_event->status);
778
779 cm_id = iw_create_cm_id(listen_id_priv->id.device,
780 listen_id_priv->id.cm_handler,
781 listen_id_priv->id.context);
782
783 if (IS_ERR(cm_id))
784 goto out;
785
786 cm_id->provider_data = iw_event->provider_data;
787 cm_id->m_local_addr = iw_event->local_addr;
788 cm_id->m_remote_addr = iw_event->remote_addr;
789 cm_id->local_addr = listen_id_priv->id.local_addr;
790
791 ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
792 &iw_event->remote_addr,
793 &cm_id->remote_addr,
794 RDMA_NL_IWCM);
795 if (ret) {
796 cm_id->remote_addr = iw_event->remote_addr;
797 } else {
798 iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
799 &iw_event->local_addr,
800 &cm_id->local_addr);
801 iw_event->local_addr = cm_id->local_addr;
802 iw_event->remote_addr = cm_id->remote_addr;
803 }
804
805 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
806 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
807
808
809
810
811
812 spin_lock_irqsave(&listen_id_priv->lock, flags);
813 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
814 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
815 iw_cm_reject(cm_id, NULL, 0);
816 iw_destroy_cm_id(cm_id);
817 goto out;
818 }
819 spin_unlock_irqrestore(&listen_id_priv->lock, flags);
820
821 ret = alloc_work_entries(cm_id_priv, 3);
822 if (ret) {
823 iw_cm_reject(cm_id, NULL, 0);
824 iw_destroy_cm_id(cm_id);
825 goto out;
826 }
827
828
829 ret = cm_id->cm_handler(cm_id, iw_event);
830 if (ret) {
831 iw_cm_reject(cm_id, NULL, 0);
832 iw_destroy_cm_id(cm_id);
833 }
834
835 out:
836 if (iw_event->private_data_len)
837 kfree(iw_event->private_data);
838 }
839
840
841
842
843
844
845
846
847
848
849
850
851
852 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
853 struct iw_cm_event *iw_event)
854 {
855 unsigned long flags;
856 int ret;
857
858 spin_lock_irqsave(&cm_id_priv->lock, flags);
859
860
861
862
863
864
865 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
866 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
867 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
868 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
869 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
870 wake_up_all(&cm_id_priv->connect_wait);
871
872 return ret;
873 }
874
875
876
877
878
879
880
881
882 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
883 struct iw_cm_event *iw_event)
884 {
885 struct ib_qp *qp = NULL;
886 unsigned long flags;
887 int ret;
888
889 spin_lock_irqsave(&cm_id_priv->lock, flags);
890
891
892
893
894 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
895 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
896 if (iw_event->status == 0) {
897 cm_id_priv->id.m_local_addr = iw_event->local_addr;
898 cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
899 iw_event->local_addr = cm_id_priv->id.local_addr;
900 iw_event->remote_addr = cm_id_priv->id.remote_addr;
901 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
902 } else {
903
904 qp = cm_id_priv->qp;
905 cm_id_priv->qp = NULL;
906 cm_id_priv->state = IW_CM_STATE_IDLE;
907 }
908 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
909 if (qp)
910 cm_id_priv->id.device->ops.iw_rem_ref(qp);
911 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
912
913 if (iw_event->private_data_len)
914 kfree(iw_event->private_data);
915
916
917 wake_up_all(&cm_id_priv->connect_wait);
918
919 return ret;
920 }
921
922
923
924
925
926
927 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
928 struct iw_cm_event *iw_event)
929 {
930 unsigned long flags;
931
932 spin_lock_irqsave(&cm_id_priv->lock, flags);
933 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
934 cm_id_priv->state = IW_CM_STATE_CLOSING;
935 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
936 }
937
938
939
940
941
942
943
944
945
946
947
948
949 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
950 struct iw_cm_event *iw_event)
951 {
952 struct ib_qp *qp;
953 unsigned long flags;
954 int ret = 0, notify_event = 0;
955 spin_lock_irqsave(&cm_id_priv->lock, flags);
956 qp = cm_id_priv->qp;
957 cm_id_priv->qp = NULL;
958
959 switch (cm_id_priv->state) {
960 case IW_CM_STATE_ESTABLISHED:
961 case IW_CM_STATE_CLOSING:
962 cm_id_priv->state = IW_CM_STATE_IDLE;
963 notify_event = 1;
964 break;
965 case IW_CM_STATE_DESTROYING:
966 break;
967 default:
968 BUG();
969 }
970 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
971
972 if (qp)
973 cm_id_priv->id.device->ops.iw_rem_ref(qp);
974 if (notify_event)
975 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
976 return ret;
977 }
978
979 static int process_event(struct iwcm_id_private *cm_id_priv,
980 struct iw_cm_event *iw_event)
981 {
982 int ret = 0;
983
984 switch (iw_event->event) {
985 case IW_CM_EVENT_CONNECT_REQUEST:
986 cm_conn_req_handler(cm_id_priv, iw_event);
987 break;
988 case IW_CM_EVENT_CONNECT_REPLY:
989 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
990 break;
991 case IW_CM_EVENT_ESTABLISHED:
992 ret = cm_conn_est_handler(cm_id_priv, iw_event);
993 break;
994 case IW_CM_EVENT_DISCONNECT:
995 cm_disconnect_handler(cm_id_priv, iw_event);
996 break;
997 case IW_CM_EVENT_CLOSE:
998 ret = cm_close_handler(cm_id_priv, iw_event);
999 break;
1000 default:
1001 BUG();
1002 }
1003
1004 return ret;
1005 }
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 static void cm_work_handler(struct work_struct *_work)
1017 {
1018 struct iwcm_work *work = container_of(_work, struct iwcm_work, work);
1019 struct iw_cm_event levent;
1020 struct iwcm_id_private *cm_id_priv = work->cm_id;
1021 unsigned long flags;
1022 int empty;
1023 int ret = 0;
1024
1025 spin_lock_irqsave(&cm_id_priv->lock, flags);
1026 empty = list_empty(&cm_id_priv->work_list);
1027 while (!empty) {
1028 work = list_entry(cm_id_priv->work_list.next,
1029 struct iwcm_work, list);
1030 list_del_init(&work->list);
1031 empty = list_empty(&cm_id_priv->work_list);
1032 levent = work->event;
1033 put_work(work);
1034 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1035
1036 if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
1037 ret = process_event(cm_id_priv, &levent);
1038 if (ret)
1039 destroy_cm_id(&cm_id_priv->id);
1040 } else
1041 pr_debug("dropping event %d\n", levent.event);
1042 if (iwcm_deref_id(cm_id_priv))
1043 return;
1044 if (empty)
1045 return;
1046 spin_lock_irqsave(&cm_id_priv->lock, flags);
1047 }
1048 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1049 }
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 static int cm_event_handler(struct iw_cm_id *cm_id,
1067 struct iw_cm_event *iw_event)
1068 {
1069 struct iwcm_work *work;
1070 struct iwcm_id_private *cm_id_priv;
1071 unsigned long flags;
1072 int ret = 0;
1073
1074 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1075
1076 spin_lock_irqsave(&cm_id_priv->lock, flags);
1077 work = get_work(cm_id_priv);
1078 if (!work) {
1079 ret = -ENOMEM;
1080 goto out;
1081 }
1082
1083 INIT_WORK(&work->work, cm_work_handler);
1084 work->cm_id = cm_id_priv;
1085 work->event = *iw_event;
1086
1087 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
1088 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
1089 work->event.private_data_len) {
1090 ret = copy_private_data(&work->event);
1091 if (ret) {
1092 put_work(work);
1093 goto out;
1094 }
1095 }
1096
1097 atomic_inc(&cm_id_priv->refcount);
1098 if (list_empty(&cm_id_priv->work_list)) {
1099 list_add_tail(&work->list, &cm_id_priv->work_list);
1100 queue_work(iwcm_wq, &work->work);
1101 } else
1102 list_add_tail(&work->list, &cm_id_priv->work_list);
1103 out:
1104 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1105 return ret;
1106 }
1107
1108 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
1109 struct ib_qp_attr *qp_attr,
1110 int *qp_attr_mask)
1111 {
1112 unsigned long flags;
1113 int ret;
1114
1115 spin_lock_irqsave(&cm_id_priv->lock, flags);
1116 switch (cm_id_priv->state) {
1117 case IW_CM_STATE_IDLE:
1118 case IW_CM_STATE_CONN_SENT:
1119 case IW_CM_STATE_CONN_RECV:
1120 case IW_CM_STATE_ESTABLISHED:
1121 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1122 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE|
1123 IB_ACCESS_REMOTE_READ;
1124 ret = 0;
1125 break;
1126 default:
1127 ret = -EINVAL;
1128 break;
1129 }
1130 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1131 return ret;
1132 }
1133
1134 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
1135 struct ib_qp_attr *qp_attr,
1136 int *qp_attr_mask)
1137 {
1138 unsigned long flags;
1139 int ret;
1140
1141 spin_lock_irqsave(&cm_id_priv->lock, flags);
1142 switch (cm_id_priv->state) {
1143 case IW_CM_STATE_IDLE:
1144 case IW_CM_STATE_CONN_SENT:
1145 case IW_CM_STATE_CONN_RECV:
1146 case IW_CM_STATE_ESTABLISHED:
1147 *qp_attr_mask = 0;
1148 ret = 0;
1149 break;
1150 default:
1151 ret = -EINVAL;
1152 break;
1153 }
1154 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1155 return ret;
1156 }
1157
1158 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1159 struct ib_qp_attr *qp_attr,
1160 int *qp_attr_mask)
1161 {
1162 struct iwcm_id_private *cm_id_priv;
1163 int ret;
1164
1165 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1166 switch (qp_attr->qp_state) {
1167 case IB_QPS_INIT:
1168 case IB_QPS_RTR:
1169 ret = iwcm_init_qp_init_attr(cm_id_priv,
1170 qp_attr, qp_attr_mask);
1171 break;
1172 case IB_QPS_RTS:
1173 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1174 qp_attr, qp_attr_mask);
1175 break;
1176 default:
1177 ret = -EINVAL;
1178 break;
1179 }
1180 return ret;
1181 }
1182 EXPORT_SYMBOL(iw_cm_init_qp_attr);
1183
1184 static int __init iw_cm_init(void)
1185 {
1186 int ret;
1187
1188 ret = iwpm_init(RDMA_NL_IWCM);
1189 if (ret)
1190 pr_err("iw_cm: couldn't init iwpm\n");
1191 else
1192 rdma_nl_register(RDMA_NL_IWCM, iwcm_nl_cb_table);
1193 iwcm_wq = alloc_ordered_workqueue("iw_cm_wq", 0);
1194 if (!iwcm_wq)
1195 return -ENOMEM;
1196
1197 iwcm_ctl_table_hdr = register_net_sysctl(&init_net, "net/iw_cm",
1198 iwcm_ctl_table);
1199 if (!iwcm_ctl_table_hdr) {
1200 pr_err("iw_cm: couldn't register sysctl paths\n");
1201 destroy_workqueue(iwcm_wq);
1202 return -ENOMEM;
1203 }
1204
1205 return 0;
1206 }
1207
1208 static void __exit iw_cm_cleanup(void)
1209 {
1210 unregister_net_sysctl_table(iwcm_ctl_table_hdr);
1211 destroy_workqueue(iwcm_wq);
1212 rdma_nl_unregister(RDMA_NL_IWCM);
1213 iwpm_exit(RDMA_NL_IWCM);
1214 }
1215
1216 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_IWCM, 2);
1217
1218 module_init(iw_cm_init);
1219 module_exit(iw_cm_cleanup);