This source file includes following definitions.
- ibcm_reject_msg
- cm_deref_id
- cm_alloc_msg
- cm_alloc_response_msg_no_ah
- cm_create_response_msg_ah
- cm_free_msg
- cm_alloc_response_msg
- cm_copy_private_data
- cm_set_private_data
- cm_init_av_for_lap
- cm_init_av_for_response
- add_cm_id_to_port_list
- get_cm_port_from_path
- cm_init_av_by_path
- cm_local_id
- cm_free_id
- cm_get_id
- cm_acquire_id
- be32_lt
- be32_gt
- be64_lt
- be64_gt
- cm_insert_listen
- cm_find_listen
- cm_insert_remote_id
- cm_find_remote_id
- cm_insert_remote_qpn
- cm_insert_remote_sidr
- cm_reject_sidr_req
- ib_create_cm_id
- cm_dequeue_work
- cm_free_work
- cm_convert_to_ms
- cm_ack_timeout
- cm_cleanup_timewait
- cm_create_timewait_info
- cm_enter_timewait
- cm_reset_to_idle
- cm_destroy_id
- ib_destroy_cm_id
- __ib_cm_listen
- ib_cm_listen
- ib_cm_insert_listen
- cm_form_tid
- cm_format_mad_hdr
- cm_format_req
- cm_validate_req_param
- ib_send_cm_req
- cm_issue_rej
- cm_is_active_peer
- cm_req_has_alt_path
- cm_path_set_rec_type
- cm_format_path_lid_from_req
- cm_format_paths_from_req
- cm_get_bth_pkey
- cm_opa_to_ib_sgid
- cm_format_req_event
- cm_process_work
- cm_format_mra
- cm_format_rej
- cm_dup_req_handler
- cm_match_req
- cm_process_routed_req
- cm_req_handler
- cm_format_rep
- ib_send_cm_rep
- cm_format_rtu
- ib_send_cm_rtu
- cm_format_rep_event
- cm_dup_rep_handler
- cm_rep_handler
- cm_establish_handler
- cm_rtu_handler
- cm_format_dreq
- ib_send_cm_dreq
- cm_format_drep
- ib_send_cm_drep
- cm_issue_drep
- cm_dreq_handler
- cm_drep_handler
- ib_send_cm_rej
- cm_format_rej_event
- cm_acquire_rejected_id
- cm_rej_handler
- ib_send_cm_mra
- cm_acquire_mraed_id
- cm_mra_handler
- cm_format_lap
- ib_send_cm_lap
- cm_format_path_lid_from_lap
- cm_format_path_from_lap
- cm_lap_handler
- cm_format_apr
- ib_send_cm_apr
- cm_apr_handler
- cm_timewait_handler
- cm_format_sidr_req
- ib_send_cm_sidr_req
- cm_format_sidr_req_event
- cm_sidr_req_handler
- cm_format_sidr_rep
- ib_send_cm_sidr_rep
- cm_format_sidr_rep_event
- cm_sidr_rep_handler
- cm_process_send_error
- cm_send_handler
- cm_work_handler
- cm_establish
- cm_migrate
- ib_cm_notify
- cm_recv_handler
- cm_init_qp_init_attr
- cm_init_qp_rtr_attr
- cm_init_qp_rts_attr
- ib_cm_init_qp_attr
- cm_show_counter
- cm_devnode
- cm_create_port_fs
- cm_remove_port_fs
- cm_add_one
- cm_remove_one
- ib_cm_init
- ib_cm_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
51
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
54 #include "cm_msgs.h"
55 #include "core_priv.h"
56
57 MODULE_AUTHOR("Sean Hefty");
58 MODULE_DESCRIPTION("InfiniBand CM");
59 MODULE_LICENSE("Dual BSD/GPL");
60
61 static const char * const ibcm_rej_reason_strs[] = {
62 [IB_CM_REJ_NO_QP] = "no QP",
63 [IB_CM_REJ_NO_EEC] = "no EEC",
64 [IB_CM_REJ_NO_RESOURCES] = "no resources",
65 [IB_CM_REJ_TIMEOUT] = "timeout",
66 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
67 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
68 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
69 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
70 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
71 [IB_CM_REJ_STALE_CONN] = "stale conn",
72 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
73 [IB_CM_REJ_INVALID_GID] = "invalid GID",
74 [IB_CM_REJ_INVALID_LID] = "invalid LID",
75 [IB_CM_REJ_INVALID_SL] = "invalid SL",
76 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
77 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
78 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
79 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
80 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
81 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
82 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
83 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
84 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
85 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
86 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
87 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
88 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
89 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
90 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
91 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
92 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
93 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
94 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
95 };
96
97 const char *__attribute_const__ ibcm_reject_msg(int reason)
98 {
99 size_t index = reason;
100
101 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
102 ibcm_rej_reason_strs[index])
103 return ibcm_rej_reason_strs[index];
104 else
105 return "unrecognized reason";
106 }
107 EXPORT_SYMBOL(ibcm_reject_msg);
108
109 static void cm_add_one(struct ib_device *device);
110 static void cm_remove_one(struct ib_device *device, void *client_data);
111
112 static struct ib_client cm_client = {
113 .name = "cm",
114 .add = cm_add_one,
115 .remove = cm_remove_one
116 };
117
118 static struct ib_cm {
119 spinlock_t lock;
120 struct list_head device_list;
121 rwlock_t device_lock;
122 struct rb_root listen_service_table;
123 u64 listen_service_id;
124
125 struct rb_root remote_qp_table;
126 struct rb_root remote_id_table;
127 struct rb_root remote_sidr_table;
128 struct xarray local_id_table;
129 u32 local_id_next;
130 __be32 random_id_operand;
131 struct list_head timewait_list;
132 struct workqueue_struct *wq;
133
134 spinlock_t state_lock;
135 } cm;
136
137
138 enum {
139 CM_REQ_COUNTER,
140 CM_MRA_COUNTER,
141 CM_REJ_COUNTER,
142 CM_REP_COUNTER,
143 CM_RTU_COUNTER,
144 CM_DREQ_COUNTER,
145 CM_DREP_COUNTER,
146 CM_SIDR_REQ_COUNTER,
147 CM_SIDR_REP_COUNTER,
148 CM_LAP_COUNTER,
149 CM_APR_COUNTER,
150 CM_ATTR_COUNT,
151 CM_ATTR_ID_OFFSET = 0x0010,
152 };
153
154 enum {
155 CM_XMIT,
156 CM_XMIT_RETRIES,
157 CM_RECV,
158 CM_RECV_DUPLICATES,
159 CM_COUNTER_GROUPS
160 };
161
162 static char const counter_group_names[CM_COUNTER_GROUPS]
163 [sizeof("cm_rx_duplicates")] = {
164 "cm_tx_msgs", "cm_tx_retries",
165 "cm_rx_msgs", "cm_rx_duplicates"
166 };
167
168 struct cm_counter_group {
169 struct kobject obj;
170 atomic_long_t counter[CM_ATTR_COUNT];
171 };
172
173 struct cm_counter_attribute {
174 struct attribute attr;
175 int index;
176 };
177
178 #define CM_COUNTER_ATTR(_name, _index) \
179 struct cm_counter_attribute cm_##_name##_counter_attr = { \
180 .attr = { .name = __stringify(_name), .mode = 0444 }, \
181 .index = _index \
182 }
183
184 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
185 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
186 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
187 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
188 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
189 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
190 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
191 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
192 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
193 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
194 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
195
196 static struct attribute *cm_counter_default_attrs[] = {
197 &cm_req_counter_attr.attr,
198 &cm_mra_counter_attr.attr,
199 &cm_rej_counter_attr.attr,
200 &cm_rep_counter_attr.attr,
201 &cm_rtu_counter_attr.attr,
202 &cm_dreq_counter_attr.attr,
203 &cm_drep_counter_attr.attr,
204 &cm_sidr_req_counter_attr.attr,
205 &cm_sidr_rep_counter_attr.attr,
206 &cm_lap_counter_attr.attr,
207 &cm_apr_counter_attr.attr,
208 NULL
209 };
210
211 struct cm_port {
212 struct cm_device *cm_dev;
213 struct ib_mad_agent *mad_agent;
214 struct kobject port_obj;
215 u8 port_num;
216 struct list_head cm_priv_prim_list;
217 struct list_head cm_priv_altr_list;
218 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
219 };
220
221 struct cm_device {
222 struct list_head list;
223 struct ib_device *ib_device;
224 u8 ack_delay;
225 int going_down;
226 struct cm_port *port[0];
227 };
228
229 struct cm_av {
230 struct cm_port *port;
231 union ib_gid dgid;
232 struct rdma_ah_attr ah_attr;
233 u16 pkey_index;
234 u8 timeout;
235 };
236
237 struct cm_work {
238 struct delayed_work work;
239 struct list_head list;
240 struct cm_port *port;
241 struct ib_mad_recv_wc *mad_recv_wc;
242 __be32 local_id;
243 __be32 remote_id;
244 struct ib_cm_event cm_event;
245 struct sa_path_rec path[0];
246 };
247
248 struct cm_timewait_info {
249 struct cm_work work;
250 struct list_head list;
251 struct rb_node remote_qp_node;
252 struct rb_node remote_id_node;
253 __be64 remote_ca_guid;
254 __be32 remote_qpn;
255 u8 inserted_remote_qp;
256 u8 inserted_remote_id;
257 };
258
259 struct cm_id_private {
260 struct ib_cm_id id;
261
262 struct rb_node service_node;
263 struct rb_node sidr_id_node;
264 spinlock_t lock;
265 struct completion comp;
266 atomic_t refcount;
267
268
269 int listen_sharecount;
270
271 struct ib_mad_send_buf *msg;
272 struct cm_timewait_info *timewait_info;
273
274 struct cm_av av;
275 struct cm_av alt_av;
276
277 void *private_data;
278 __be64 tid;
279 __be32 local_qpn;
280 __be32 remote_qpn;
281 enum ib_qp_type qp_type;
282 __be32 sq_psn;
283 __be32 rq_psn;
284 int timeout_ms;
285 enum ib_mtu path_mtu;
286 __be16 pkey;
287 u8 private_data_len;
288 u8 max_cm_retries;
289 u8 peer_to_peer;
290 u8 responder_resources;
291 u8 initiator_depth;
292 u8 retry_count;
293 u8 rnr_retry_count;
294 u8 service_timeout;
295 u8 target_ack_delay;
296
297 struct list_head prim_list;
298 struct list_head altr_list;
299
300 int prim_send_port_not_ready;
301 int altr_send_port_not_ready;
302
303 struct list_head work_list;
304 atomic_t work_count;
305 };
306
307 static void cm_work_handler(struct work_struct *work);
308
309 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310 {
311 if (atomic_dec_and_test(&cm_id_priv->refcount))
312 complete(&cm_id_priv->comp);
313 }
314
315 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
316 struct ib_mad_send_buf **msg)
317 {
318 struct ib_mad_agent *mad_agent;
319 struct ib_mad_send_buf *m;
320 struct ib_ah *ah;
321 struct cm_av *av;
322 unsigned long flags, flags2;
323 int ret = 0;
324
325
326 spin_lock_irqsave(&cm.state_lock, flags2);
327 spin_lock_irqsave(&cm.lock, flags);
328 if (!cm_id_priv->prim_send_port_not_ready)
329 av = &cm_id_priv->av;
330 else if (!cm_id_priv->altr_send_port_not_ready &&
331 (cm_id_priv->alt_av.port))
332 av = &cm_id_priv->alt_av;
333 else {
334 pr_info("%s: not valid CM id\n", __func__);
335 ret = -ENODEV;
336 spin_unlock_irqrestore(&cm.lock, flags);
337 goto out;
338 }
339 spin_unlock_irqrestore(&cm.lock, flags);
340
341 mad_agent = cm_id_priv->av.port->mad_agent;
342 if (!mad_agent) {
343 pr_info("%s: not a valid MAD agent\n", __func__);
344 ret = -ENODEV;
345 goto out;
346 }
347 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
348 if (IS_ERR(ah)) {
349 ret = PTR_ERR(ah);
350 goto out;
351 }
352
353 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
354 av->pkey_index,
355 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
356 GFP_ATOMIC,
357 IB_MGMT_BASE_VERSION);
358 if (IS_ERR(m)) {
359 rdma_destroy_ah(ah, 0);
360 ret = PTR_ERR(m);
361 goto out;
362 }
363
364
365 m->ah = ah;
366 m->retries = cm_id_priv->max_cm_retries;
367
368 atomic_inc(&cm_id_priv->refcount);
369 m->context[0] = cm_id_priv;
370 *msg = m;
371
372 out:
373 spin_unlock_irqrestore(&cm.state_lock, flags2);
374 return ret;
375 }
376
377 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
378 struct ib_mad_recv_wc *mad_recv_wc)
379 {
380 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
381 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
382 GFP_ATOMIC,
383 IB_MGMT_BASE_VERSION);
384 }
385
386 static int cm_create_response_msg_ah(struct cm_port *port,
387 struct ib_mad_recv_wc *mad_recv_wc,
388 struct ib_mad_send_buf *msg)
389 {
390 struct ib_ah *ah;
391
392 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
393 mad_recv_wc->recv_buf.grh, port->port_num);
394 if (IS_ERR(ah))
395 return PTR_ERR(ah);
396
397 msg->ah = ah;
398 return 0;
399 }
400
401 static void cm_free_msg(struct ib_mad_send_buf *msg)
402 {
403 if (msg->ah)
404 rdma_destroy_ah(msg->ah, 0);
405 if (msg->context[0])
406 cm_deref_id(msg->context[0]);
407 ib_free_send_mad(msg);
408 }
409
410 static int cm_alloc_response_msg(struct cm_port *port,
411 struct ib_mad_recv_wc *mad_recv_wc,
412 struct ib_mad_send_buf **msg)
413 {
414 struct ib_mad_send_buf *m;
415 int ret;
416
417 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
418 if (IS_ERR(m))
419 return PTR_ERR(m);
420
421 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
422 if (ret) {
423 cm_free_msg(m);
424 return ret;
425 }
426
427 *msg = m;
428 return 0;
429 }
430
431 static void * cm_copy_private_data(const void *private_data,
432 u8 private_data_len)
433 {
434 void *data;
435
436 if (!private_data || !private_data_len)
437 return NULL;
438
439 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
440 if (!data)
441 return ERR_PTR(-ENOMEM);
442
443 return data;
444 }
445
446 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
447 void *private_data, u8 private_data_len)
448 {
449 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
450 kfree(cm_id_priv->private_data);
451
452 cm_id_priv->private_data = private_data;
453 cm_id_priv->private_data_len = private_data_len;
454 }
455
456 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
457 struct ib_grh *grh, struct cm_av *av)
458 {
459 struct rdma_ah_attr new_ah_attr;
460 int ret;
461
462 av->port = port;
463 av->pkey_index = wc->pkey_index;
464
465
466
467
468
469
470
471
472 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
473 port->port_num, wc,
474 grh, &new_ah_attr);
475 if (ret)
476 return ret;
477
478 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
479 return 0;
480 }
481
482 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
483 struct ib_grh *grh, struct cm_av *av)
484 {
485 av->port = port;
486 av->pkey_index = wc->pkey_index;
487 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
488 port->port_num, wc,
489 grh, &av->ah_attr);
490 }
491
492 static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
493 struct cm_av *av,
494 struct cm_port *port)
495 {
496 unsigned long flags;
497 int ret = 0;
498
499 spin_lock_irqsave(&cm.lock, flags);
500
501 if (&cm_id_priv->av == av)
502 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
503 else if (&cm_id_priv->alt_av == av)
504 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
505 else
506 ret = -EINVAL;
507
508 spin_unlock_irqrestore(&cm.lock, flags);
509 return ret;
510 }
511
512 static struct cm_port *
513 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
514 {
515 struct cm_device *cm_dev;
516 struct cm_port *port = NULL;
517 unsigned long flags;
518
519 if (attr) {
520 read_lock_irqsave(&cm.device_lock, flags);
521 list_for_each_entry(cm_dev, &cm.device_list, list) {
522 if (cm_dev->ib_device == attr->device) {
523 port = cm_dev->port[attr->port_num - 1];
524 break;
525 }
526 }
527 read_unlock_irqrestore(&cm.device_lock, flags);
528 } else {
529
530
531
532
533
534
535 read_lock_irqsave(&cm.device_lock, flags);
536 list_for_each_entry(cm_dev, &cm.device_list, list) {
537 attr = rdma_find_gid(cm_dev->ib_device,
538 &path->sgid,
539 sa_conv_pathrec_to_gid_type(path),
540 NULL);
541 if (!IS_ERR(attr)) {
542 port = cm_dev->port[attr->port_num - 1];
543 break;
544 }
545 }
546 read_unlock_irqrestore(&cm.device_lock, flags);
547 if (port)
548 rdma_put_gid_attr(attr);
549 }
550 return port;
551 }
552
553 static int cm_init_av_by_path(struct sa_path_rec *path,
554 const struct ib_gid_attr *sgid_attr,
555 struct cm_av *av,
556 struct cm_id_private *cm_id_priv)
557 {
558 struct rdma_ah_attr new_ah_attr;
559 struct cm_device *cm_dev;
560 struct cm_port *port;
561 int ret;
562
563 port = get_cm_port_from_path(path, sgid_attr);
564 if (!port)
565 return -EINVAL;
566 cm_dev = port->cm_dev;
567
568 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
569 be16_to_cpu(path->pkey), &av->pkey_index);
570 if (ret)
571 return ret;
572
573 av->port = port;
574
575
576
577
578
579
580
581
582
583
584 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
585 &new_ah_attr, sgid_attr);
586 if (ret)
587 return ret;
588
589 av->timeout = path->packet_life_time + 1;
590
591 ret = add_cm_id_to_port_list(cm_id_priv, av, port);
592 if (ret) {
593 rdma_destroy_ah_attr(&new_ah_attr);
594 return ret;
595 }
596 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
597 return 0;
598 }
599
600 static u32 cm_local_id(__be32 local_id)
601 {
602 return (__force u32) (local_id ^ cm.random_id_operand);
603 }
604
605 static void cm_free_id(__be32 local_id)
606 {
607 xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
608 }
609
610 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
611 {
612 struct cm_id_private *cm_id_priv;
613
614 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
615 if (cm_id_priv) {
616 if (cm_id_priv->id.remote_id == remote_id)
617 atomic_inc(&cm_id_priv->refcount);
618 else
619 cm_id_priv = NULL;
620 }
621
622 return cm_id_priv;
623 }
624
625 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
626 {
627 struct cm_id_private *cm_id_priv;
628
629 spin_lock_irq(&cm.lock);
630 cm_id_priv = cm_get_id(local_id, remote_id);
631 spin_unlock_irq(&cm.lock);
632
633 return cm_id_priv;
634 }
635
636
637
638
639
640
641 static int be32_lt(__be32 a, __be32 b)
642 {
643 return (__force u32) a < (__force u32) b;
644 }
645
646 static int be32_gt(__be32 a, __be32 b)
647 {
648 return (__force u32) a > (__force u32) b;
649 }
650
651 static int be64_lt(__be64 a, __be64 b)
652 {
653 return (__force u64) a < (__force u64) b;
654 }
655
656 static int be64_gt(__be64 a, __be64 b)
657 {
658 return (__force u64) a > (__force u64) b;
659 }
660
661 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
662 {
663 struct rb_node **link = &cm.listen_service_table.rb_node;
664 struct rb_node *parent = NULL;
665 struct cm_id_private *cur_cm_id_priv;
666 __be64 service_id = cm_id_priv->id.service_id;
667 __be64 service_mask = cm_id_priv->id.service_mask;
668
669 while (*link) {
670 parent = *link;
671 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
672 service_node);
673 if ((cur_cm_id_priv->id.service_mask & service_id) ==
674 (service_mask & cur_cm_id_priv->id.service_id) &&
675 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
676 return cur_cm_id_priv;
677
678 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
679 link = &(*link)->rb_left;
680 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
681 link = &(*link)->rb_right;
682 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
683 link = &(*link)->rb_left;
684 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
685 link = &(*link)->rb_right;
686 else
687 link = &(*link)->rb_right;
688 }
689 rb_link_node(&cm_id_priv->service_node, parent, link);
690 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
691 return NULL;
692 }
693
694 static struct cm_id_private * cm_find_listen(struct ib_device *device,
695 __be64 service_id)
696 {
697 struct rb_node *node = cm.listen_service_table.rb_node;
698 struct cm_id_private *cm_id_priv;
699
700 while (node) {
701 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
702 if ((cm_id_priv->id.service_mask & service_id) ==
703 cm_id_priv->id.service_id &&
704 (cm_id_priv->id.device == device))
705 return cm_id_priv;
706
707 if (device < cm_id_priv->id.device)
708 node = node->rb_left;
709 else if (device > cm_id_priv->id.device)
710 node = node->rb_right;
711 else if (be64_lt(service_id, cm_id_priv->id.service_id))
712 node = node->rb_left;
713 else if (be64_gt(service_id, cm_id_priv->id.service_id))
714 node = node->rb_right;
715 else
716 node = node->rb_right;
717 }
718 return NULL;
719 }
720
721 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
722 *timewait_info)
723 {
724 struct rb_node **link = &cm.remote_id_table.rb_node;
725 struct rb_node *parent = NULL;
726 struct cm_timewait_info *cur_timewait_info;
727 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
728 __be32 remote_id = timewait_info->work.remote_id;
729
730 while (*link) {
731 parent = *link;
732 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
733 remote_id_node);
734 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
735 link = &(*link)->rb_left;
736 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
737 link = &(*link)->rb_right;
738 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
739 link = &(*link)->rb_left;
740 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
741 link = &(*link)->rb_right;
742 else
743 return cur_timewait_info;
744 }
745 timewait_info->inserted_remote_id = 1;
746 rb_link_node(&timewait_info->remote_id_node, parent, link);
747 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
748 return NULL;
749 }
750
751 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
752 __be32 remote_id)
753 {
754 struct rb_node *node = cm.remote_id_table.rb_node;
755 struct cm_timewait_info *timewait_info;
756
757 while (node) {
758 timewait_info = rb_entry(node, struct cm_timewait_info,
759 remote_id_node);
760 if (be32_lt(remote_id, timewait_info->work.remote_id))
761 node = node->rb_left;
762 else if (be32_gt(remote_id, timewait_info->work.remote_id))
763 node = node->rb_right;
764 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
765 node = node->rb_left;
766 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
767 node = node->rb_right;
768 else
769 return timewait_info;
770 }
771 return NULL;
772 }
773
774 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
775 *timewait_info)
776 {
777 struct rb_node **link = &cm.remote_qp_table.rb_node;
778 struct rb_node *parent = NULL;
779 struct cm_timewait_info *cur_timewait_info;
780 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
781 __be32 remote_qpn = timewait_info->remote_qpn;
782
783 while (*link) {
784 parent = *link;
785 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
786 remote_qp_node);
787 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
788 link = &(*link)->rb_left;
789 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
790 link = &(*link)->rb_right;
791 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
792 link = &(*link)->rb_left;
793 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
794 link = &(*link)->rb_right;
795 else
796 return cur_timewait_info;
797 }
798 timewait_info->inserted_remote_qp = 1;
799 rb_link_node(&timewait_info->remote_qp_node, parent, link);
800 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
801 return NULL;
802 }
803
804 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
805 *cm_id_priv)
806 {
807 struct rb_node **link = &cm.remote_sidr_table.rb_node;
808 struct rb_node *parent = NULL;
809 struct cm_id_private *cur_cm_id_priv;
810 union ib_gid *port_gid = &cm_id_priv->av.dgid;
811 __be32 remote_id = cm_id_priv->id.remote_id;
812
813 while (*link) {
814 parent = *link;
815 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
816 sidr_id_node);
817 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
818 link = &(*link)->rb_left;
819 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
820 link = &(*link)->rb_right;
821 else {
822 int cmp;
823 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
824 sizeof *port_gid);
825 if (cmp < 0)
826 link = &(*link)->rb_left;
827 else if (cmp > 0)
828 link = &(*link)->rb_right;
829 else
830 return cur_cm_id_priv;
831 }
832 }
833 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
834 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
835 return NULL;
836 }
837
838 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
839 enum ib_cm_sidr_status status)
840 {
841 struct ib_cm_sidr_rep_param param;
842
843 memset(¶m, 0, sizeof param);
844 param.status = status;
845 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
846 }
847
848 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
849 ib_cm_handler cm_handler,
850 void *context)
851 {
852 struct cm_id_private *cm_id_priv;
853 u32 id;
854 int ret;
855
856 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
857 if (!cm_id_priv)
858 return ERR_PTR(-ENOMEM);
859
860 cm_id_priv->id.state = IB_CM_IDLE;
861 cm_id_priv->id.device = device;
862 cm_id_priv->id.cm_handler = cm_handler;
863 cm_id_priv->id.context = context;
864 cm_id_priv->id.remote_cm_qpn = 1;
865
866 spin_lock_init(&cm_id_priv->lock);
867 init_completion(&cm_id_priv->comp);
868 INIT_LIST_HEAD(&cm_id_priv->work_list);
869 INIT_LIST_HEAD(&cm_id_priv->prim_list);
870 INIT_LIST_HEAD(&cm_id_priv->altr_list);
871 atomic_set(&cm_id_priv->work_count, -1);
872 atomic_set(&cm_id_priv->refcount, 1);
873
874 ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
875 &cm.local_id_next, GFP_KERNEL);
876 if (ret < 0)
877 goto error;
878 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
879 xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
880 cm_id_priv, GFP_KERNEL);
881
882 return &cm_id_priv->id;
883
884 error:
885 kfree(cm_id_priv);
886 return ERR_PTR(ret);
887 }
888 EXPORT_SYMBOL(ib_create_cm_id);
889
890 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
891 {
892 struct cm_work *work;
893
894 if (list_empty(&cm_id_priv->work_list))
895 return NULL;
896
897 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
898 list_del(&work->list);
899 return work;
900 }
901
902 static void cm_free_work(struct cm_work *work)
903 {
904 if (work->mad_recv_wc)
905 ib_free_recv_mad(work->mad_recv_wc);
906 kfree(work);
907 }
908
909 static inline int cm_convert_to_ms(int iba_time)
910 {
911
912 return 1 << max(iba_time - 8, 0);
913 }
914
915
916
917
918
919
920
921 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
922 {
923 int ack_timeout = packet_life_time + 1;
924
925 if (ack_timeout >= ca_ack_delay)
926 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
927 else
928 ack_timeout = ca_ack_delay +
929 (ack_timeout >= (ca_ack_delay - 1));
930
931 return min(31, ack_timeout);
932 }
933
934 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
935 {
936 if (timewait_info->inserted_remote_id) {
937 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
938 timewait_info->inserted_remote_id = 0;
939 }
940
941 if (timewait_info->inserted_remote_qp) {
942 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
943 timewait_info->inserted_remote_qp = 0;
944 }
945 }
946
947 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
948 {
949 struct cm_timewait_info *timewait_info;
950
951 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
952 if (!timewait_info)
953 return ERR_PTR(-ENOMEM);
954
955 timewait_info->work.local_id = local_id;
956 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
957 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
958 return timewait_info;
959 }
960
961 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
962 {
963 int wait_time;
964 unsigned long flags;
965 struct cm_device *cm_dev;
966
967 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
968 if (!cm_dev)
969 return;
970
971 spin_lock_irqsave(&cm.lock, flags);
972 cm_cleanup_timewait(cm_id_priv->timewait_info);
973 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
974 spin_unlock_irqrestore(&cm.lock, flags);
975
976
977
978
979
980
981 cm_id_priv->id.state = IB_CM_TIMEWAIT;
982 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
983
984
985 spin_lock_irqsave(&cm.lock, flags);
986 if (!cm_dev->going_down)
987 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
988 msecs_to_jiffies(wait_time));
989 spin_unlock_irqrestore(&cm.lock, flags);
990
991 cm_id_priv->timewait_info = NULL;
992 }
993
994 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
995 {
996 unsigned long flags;
997
998 cm_id_priv->id.state = IB_CM_IDLE;
999 if (cm_id_priv->timewait_info) {
1000 spin_lock_irqsave(&cm.lock, flags);
1001 cm_cleanup_timewait(cm_id_priv->timewait_info);
1002 spin_unlock_irqrestore(&cm.lock, flags);
1003 kfree(cm_id_priv->timewait_info);
1004 cm_id_priv->timewait_info = NULL;
1005 }
1006 }
1007
1008 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1009 {
1010 struct cm_id_private *cm_id_priv;
1011 struct cm_work *work;
1012
1013 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1014 retest:
1015 spin_lock_irq(&cm_id_priv->lock);
1016 switch (cm_id->state) {
1017 case IB_CM_LISTEN:
1018 spin_unlock_irq(&cm_id_priv->lock);
1019
1020 spin_lock_irq(&cm.lock);
1021 if (--cm_id_priv->listen_sharecount > 0) {
1022
1023 cm_deref_id(cm_id_priv);
1024 spin_unlock_irq(&cm.lock);
1025 return;
1026 }
1027 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1028 spin_unlock_irq(&cm.lock);
1029 break;
1030 case IB_CM_SIDR_REQ_SENT:
1031 cm_id->state = IB_CM_IDLE;
1032 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1033 spin_unlock_irq(&cm_id_priv->lock);
1034 break;
1035 case IB_CM_SIDR_REQ_RCVD:
1036 spin_unlock_irq(&cm_id_priv->lock);
1037 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
1038 spin_lock_irq(&cm.lock);
1039 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1040 rb_erase(&cm_id_priv->sidr_id_node,
1041 &cm.remote_sidr_table);
1042 spin_unlock_irq(&cm.lock);
1043 break;
1044 case IB_CM_REQ_SENT:
1045 case IB_CM_MRA_REQ_RCVD:
1046 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1047 spin_unlock_irq(&cm_id_priv->lock);
1048 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
1049 &cm_id_priv->id.device->node_guid,
1050 sizeof cm_id_priv->id.device->node_guid,
1051 NULL, 0);
1052 break;
1053 case IB_CM_REQ_RCVD:
1054 if (err == -ENOMEM) {
1055
1056 cm_reset_to_idle(cm_id_priv);
1057 spin_unlock_irq(&cm_id_priv->lock);
1058 } else {
1059 spin_unlock_irq(&cm_id_priv->lock);
1060 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1061 NULL, 0, NULL, 0);
1062 }
1063 break;
1064 case IB_CM_REP_SENT:
1065 case IB_CM_MRA_REP_RCVD:
1066 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1067
1068 case IB_CM_MRA_REQ_SENT:
1069 case IB_CM_REP_RCVD:
1070 case IB_CM_MRA_REP_SENT:
1071 spin_unlock_irq(&cm_id_priv->lock);
1072 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1073 NULL, 0, NULL, 0);
1074 break;
1075 case IB_CM_ESTABLISHED:
1076 spin_unlock_irq(&cm_id_priv->lock);
1077 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1078 break;
1079 ib_send_cm_dreq(cm_id, NULL, 0);
1080 goto retest;
1081 case IB_CM_DREQ_SENT:
1082 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1083 cm_enter_timewait(cm_id_priv);
1084 spin_unlock_irq(&cm_id_priv->lock);
1085 break;
1086 case IB_CM_DREQ_RCVD:
1087 spin_unlock_irq(&cm_id_priv->lock);
1088 ib_send_cm_drep(cm_id, NULL, 0);
1089 break;
1090 default:
1091 spin_unlock_irq(&cm_id_priv->lock);
1092 break;
1093 }
1094
1095 spin_lock_irq(&cm.lock);
1096 if (!list_empty(&cm_id_priv->altr_list) &&
1097 (!cm_id_priv->altr_send_port_not_ready))
1098 list_del(&cm_id_priv->altr_list);
1099 if (!list_empty(&cm_id_priv->prim_list) &&
1100 (!cm_id_priv->prim_send_port_not_ready))
1101 list_del(&cm_id_priv->prim_list);
1102 spin_unlock_irq(&cm.lock);
1103
1104 cm_free_id(cm_id->local_id);
1105 cm_deref_id(cm_id_priv);
1106 wait_for_completion(&cm_id_priv->comp);
1107 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1108 cm_free_work(work);
1109
1110 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1111 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1112 kfree(cm_id_priv->private_data);
1113 kfree(cm_id_priv);
1114 }
1115
1116 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1117 {
1118 cm_destroy_id(cm_id, 0);
1119 }
1120 EXPORT_SYMBOL(ib_destroy_cm_id);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1136 __be64 service_mask)
1137 {
1138 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1139 int ret = 0;
1140
1141 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1142 service_id &= service_mask;
1143 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1144 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1145 return -EINVAL;
1146
1147 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1148 if (cm_id->state != IB_CM_IDLE)
1149 return -EINVAL;
1150
1151 cm_id->state = IB_CM_LISTEN;
1152 ++cm_id_priv->listen_sharecount;
1153
1154 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1155 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1156 cm_id->service_mask = ~cpu_to_be64(0);
1157 } else {
1158 cm_id->service_id = service_id;
1159 cm_id->service_mask = service_mask;
1160 }
1161 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1162
1163 if (cur_cm_id_priv) {
1164 cm_id->state = IB_CM_IDLE;
1165 --cm_id_priv->listen_sharecount;
1166 ret = -EBUSY;
1167 }
1168 return ret;
1169 }
1170
1171 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1172 {
1173 unsigned long flags;
1174 int ret;
1175
1176 spin_lock_irqsave(&cm.lock, flags);
1177 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1178 spin_unlock_irqrestore(&cm.lock, flags);
1179
1180 return ret;
1181 }
1182 EXPORT_SYMBOL(ib_cm_listen);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1201 ib_cm_handler cm_handler,
1202 __be64 service_id)
1203 {
1204 struct cm_id_private *cm_id_priv;
1205 struct ib_cm_id *cm_id;
1206 unsigned long flags;
1207 int err = 0;
1208
1209
1210 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1211 if (IS_ERR(cm_id))
1212 return cm_id;
1213
1214 spin_lock_irqsave(&cm.lock, flags);
1215
1216 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1217 goto new_id;
1218
1219
1220 cm_id_priv = cm_find_listen(device, service_id);
1221 if (cm_id_priv) {
1222 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1223
1224
1225 spin_unlock_irqrestore(&cm.lock, flags);
1226 ib_destroy_cm_id(cm_id);
1227 return ERR_PTR(-EINVAL);
1228 }
1229 atomic_inc(&cm_id_priv->refcount);
1230 ++cm_id_priv->listen_sharecount;
1231 spin_unlock_irqrestore(&cm.lock, flags);
1232
1233 ib_destroy_cm_id(cm_id);
1234 cm_id = &cm_id_priv->id;
1235 return cm_id;
1236 }
1237
1238 new_id:
1239
1240 err = __ib_cm_listen(cm_id, service_id, 0);
1241
1242 spin_unlock_irqrestore(&cm.lock, flags);
1243
1244 if (err) {
1245 ib_destroy_cm_id(cm_id);
1246 return ERR_PTR(err);
1247 }
1248 return cm_id;
1249 }
1250 EXPORT_SYMBOL(ib_cm_insert_listen);
1251
1252 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1253 {
1254 u64 hi_tid, low_tid;
1255
1256 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1257 low_tid = (u64)cm_id_priv->id.local_id;
1258 return cpu_to_be64(hi_tid | low_tid);
1259 }
1260
1261 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1262 __be16 attr_id, __be64 tid)
1263 {
1264 hdr->base_version = IB_MGMT_BASE_VERSION;
1265 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1266 hdr->class_version = IB_CM_CLASS_VERSION;
1267 hdr->method = IB_MGMT_METHOD_SEND;
1268 hdr->attr_id = attr_id;
1269 hdr->tid = tid;
1270 }
1271
1272 static void cm_format_req(struct cm_req_msg *req_msg,
1273 struct cm_id_private *cm_id_priv,
1274 struct ib_cm_req_param *param)
1275 {
1276 struct sa_path_rec *pri_path = param->primary_path;
1277 struct sa_path_rec *alt_path = param->alternate_path;
1278 bool pri_ext = false;
1279
1280 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1281 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1282 pri_path->opa.slid);
1283
1284 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1285 cm_form_tid(cm_id_priv));
1286
1287 req_msg->local_comm_id = cm_id_priv->id.local_id;
1288 req_msg->service_id = param->service_id;
1289 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1290 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1291 cm_req_set_init_depth(req_msg, param->initiator_depth);
1292 cm_req_set_remote_resp_timeout(req_msg,
1293 param->remote_cm_response_timeout);
1294 cm_req_set_qp_type(req_msg, param->qp_type);
1295 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1296 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1297 cm_req_set_local_resp_timeout(req_msg,
1298 param->local_cm_response_timeout);
1299 req_msg->pkey = param->primary_path->pkey;
1300 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1301 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1302
1303 if (param->qp_type != IB_QPT_XRC_INI) {
1304 cm_req_set_resp_res(req_msg, param->responder_resources);
1305 cm_req_set_retry_count(req_msg, param->retry_count);
1306 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1307 cm_req_set_srq(req_msg, param->srq);
1308 }
1309
1310 req_msg->primary_local_gid = pri_path->sgid;
1311 req_msg->primary_remote_gid = pri_path->dgid;
1312 if (pri_ext) {
1313 req_msg->primary_local_gid.global.interface_id
1314 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1315 req_msg->primary_remote_gid.global.interface_id
1316 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1317 }
1318 if (pri_path->hop_limit <= 1) {
1319 req_msg->primary_local_lid = pri_ext ? 0 :
1320 htons(ntohl(sa_path_get_slid(pri_path)));
1321 req_msg->primary_remote_lid = pri_ext ? 0 :
1322 htons(ntohl(sa_path_get_dlid(pri_path)));
1323 } else {
1324
1325 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1326 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1327 }
1328 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1329 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1330 req_msg->primary_traffic_class = pri_path->traffic_class;
1331 req_msg->primary_hop_limit = pri_path->hop_limit;
1332 cm_req_set_primary_sl(req_msg, pri_path->sl);
1333 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1334 cm_req_set_primary_local_ack_timeout(req_msg,
1335 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1336 pri_path->packet_life_time));
1337
1338 if (alt_path) {
1339 bool alt_ext = false;
1340
1341 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1342 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1343 alt_path->opa.slid);
1344
1345 req_msg->alt_local_gid = alt_path->sgid;
1346 req_msg->alt_remote_gid = alt_path->dgid;
1347 if (alt_ext) {
1348 req_msg->alt_local_gid.global.interface_id
1349 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1350 req_msg->alt_remote_gid.global.interface_id
1351 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1352 }
1353 if (alt_path->hop_limit <= 1) {
1354 req_msg->alt_local_lid = alt_ext ? 0 :
1355 htons(ntohl(sa_path_get_slid(alt_path)));
1356 req_msg->alt_remote_lid = alt_ext ? 0 :
1357 htons(ntohl(sa_path_get_dlid(alt_path)));
1358 } else {
1359 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1360 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1361 }
1362 cm_req_set_alt_flow_label(req_msg,
1363 alt_path->flow_label);
1364 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1365 req_msg->alt_traffic_class = alt_path->traffic_class;
1366 req_msg->alt_hop_limit = alt_path->hop_limit;
1367 cm_req_set_alt_sl(req_msg, alt_path->sl);
1368 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1369 cm_req_set_alt_local_ack_timeout(req_msg,
1370 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1371 alt_path->packet_life_time));
1372 }
1373
1374 if (param->private_data && param->private_data_len)
1375 memcpy(req_msg->private_data, param->private_data,
1376 param->private_data_len);
1377 }
1378
1379 static int cm_validate_req_param(struct ib_cm_req_param *param)
1380 {
1381
1382 if (param->peer_to_peer)
1383 return -EINVAL;
1384
1385 if (!param->primary_path)
1386 return -EINVAL;
1387
1388 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1389 param->qp_type != IB_QPT_XRC_INI)
1390 return -EINVAL;
1391
1392 if (param->private_data &&
1393 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1394 return -EINVAL;
1395
1396 if (param->alternate_path &&
1397 (param->alternate_path->pkey != param->primary_path->pkey ||
1398 param->alternate_path->mtu != param->primary_path->mtu))
1399 return -EINVAL;
1400
1401 return 0;
1402 }
1403
1404 int ib_send_cm_req(struct ib_cm_id *cm_id,
1405 struct ib_cm_req_param *param)
1406 {
1407 struct cm_id_private *cm_id_priv;
1408 struct cm_req_msg *req_msg;
1409 unsigned long flags;
1410 int ret;
1411
1412 ret = cm_validate_req_param(param);
1413 if (ret)
1414 return ret;
1415
1416
1417 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1418 spin_lock_irqsave(&cm_id_priv->lock, flags);
1419 if (cm_id->state != IB_CM_IDLE) {
1420 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1421 ret = -EINVAL;
1422 goto out;
1423 }
1424 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1425
1426 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1427 id.local_id);
1428 if (IS_ERR(cm_id_priv->timewait_info)) {
1429 ret = PTR_ERR(cm_id_priv->timewait_info);
1430 goto out;
1431 }
1432
1433 ret = cm_init_av_by_path(param->primary_path,
1434 param->ppath_sgid_attr, &cm_id_priv->av,
1435 cm_id_priv);
1436 if (ret)
1437 goto error1;
1438 if (param->alternate_path) {
1439 ret = cm_init_av_by_path(param->alternate_path, NULL,
1440 &cm_id_priv->alt_av, cm_id_priv);
1441 if (ret)
1442 goto error1;
1443 }
1444 cm_id->service_id = param->service_id;
1445 cm_id->service_mask = ~cpu_to_be64(0);
1446 cm_id_priv->timeout_ms = cm_convert_to_ms(
1447 param->primary_path->packet_life_time) * 2 +
1448 cm_convert_to_ms(
1449 param->remote_cm_response_timeout);
1450 cm_id_priv->max_cm_retries = param->max_cm_retries;
1451 cm_id_priv->initiator_depth = param->initiator_depth;
1452 cm_id_priv->responder_resources = param->responder_resources;
1453 cm_id_priv->retry_count = param->retry_count;
1454 cm_id_priv->path_mtu = param->primary_path->mtu;
1455 cm_id_priv->pkey = param->primary_path->pkey;
1456 cm_id_priv->qp_type = param->qp_type;
1457
1458 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1459 if (ret)
1460 goto error1;
1461
1462 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1463 cm_format_req(req_msg, cm_id_priv, param);
1464 cm_id_priv->tid = req_msg->hdr.tid;
1465 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1466 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1467
1468 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1469 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1470
1471 spin_lock_irqsave(&cm_id_priv->lock, flags);
1472 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1473 if (ret) {
1474 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1475 goto error2;
1476 }
1477 BUG_ON(cm_id->state != IB_CM_IDLE);
1478 cm_id->state = IB_CM_REQ_SENT;
1479 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1480 return 0;
1481
1482 error2: cm_free_msg(cm_id_priv->msg);
1483 error1: kfree(cm_id_priv->timewait_info);
1484 out: return ret;
1485 }
1486 EXPORT_SYMBOL(ib_send_cm_req);
1487
1488 static int cm_issue_rej(struct cm_port *port,
1489 struct ib_mad_recv_wc *mad_recv_wc,
1490 enum ib_cm_rej_reason reason,
1491 enum cm_msg_response msg_rejected,
1492 void *ari, u8 ari_length)
1493 {
1494 struct ib_mad_send_buf *msg = NULL;
1495 struct cm_rej_msg *rej_msg, *rcv_msg;
1496 int ret;
1497
1498 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1499 if (ret)
1500 return ret;
1501
1502
1503 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1504 rej_msg = (struct cm_rej_msg *) msg->mad;
1505
1506 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1507 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1508 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1509 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1510 rej_msg->reason = cpu_to_be16(reason);
1511
1512 if (ari && ari_length) {
1513 cm_rej_set_reject_info_len(rej_msg, ari_length);
1514 memcpy(rej_msg->ari, ari, ari_length);
1515 }
1516
1517 ret = ib_post_send_mad(msg, NULL);
1518 if (ret)
1519 cm_free_msg(msg);
1520
1521 return ret;
1522 }
1523
1524 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1525 __be32 local_qpn, __be32 remote_qpn)
1526 {
1527 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1528 ((local_ca_guid == remote_ca_guid) &&
1529 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1530 }
1531
1532 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1533 {
1534 return ((req_msg->alt_local_lid) ||
1535 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1536 }
1537
1538 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1539 struct sa_path_rec *path, union ib_gid *gid)
1540 {
1541 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1542 path->rec_type = SA_PATH_REC_TYPE_OPA;
1543 else
1544 path->rec_type = SA_PATH_REC_TYPE_IB;
1545 }
1546
1547 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1548 struct sa_path_rec *primary_path,
1549 struct sa_path_rec *alt_path)
1550 {
1551 u32 lid;
1552
1553 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1554 sa_path_set_dlid(primary_path,
1555 ntohs(req_msg->primary_local_lid));
1556 sa_path_set_slid(primary_path,
1557 ntohs(req_msg->primary_remote_lid));
1558 } else {
1559 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1560 sa_path_set_dlid(primary_path, lid);
1561
1562 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1563 sa_path_set_slid(primary_path, lid);
1564 }
1565
1566 if (!cm_req_has_alt_path(req_msg))
1567 return;
1568
1569 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1570 sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
1571 sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
1572 } else {
1573 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1574 sa_path_set_dlid(alt_path, lid);
1575
1576 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1577 sa_path_set_slid(alt_path, lid);
1578 }
1579 }
1580
1581 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1582 struct sa_path_rec *primary_path,
1583 struct sa_path_rec *alt_path)
1584 {
1585 primary_path->dgid = req_msg->primary_local_gid;
1586 primary_path->sgid = req_msg->primary_remote_gid;
1587 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1588 primary_path->hop_limit = req_msg->primary_hop_limit;
1589 primary_path->traffic_class = req_msg->primary_traffic_class;
1590 primary_path->reversible = 1;
1591 primary_path->pkey = req_msg->pkey;
1592 primary_path->sl = cm_req_get_primary_sl(req_msg);
1593 primary_path->mtu_selector = IB_SA_EQ;
1594 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1595 primary_path->rate_selector = IB_SA_EQ;
1596 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1597 primary_path->packet_life_time_selector = IB_SA_EQ;
1598 primary_path->packet_life_time =
1599 cm_req_get_primary_local_ack_timeout(req_msg);
1600 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1601 primary_path->service_id = req_msg->service_id;
1602 if (sa_path_is_roce(primary_path))
1603 primary_path->roce.route_resolved = false;
1604
1605 if (cm_req_has_alt_path(req_msg)) {
1606 alt_path->dgid = req_msg->alt_local_gid;
1607 alt_path->sgid = req_msg->alt_remote_gid;
1608 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1609 alt_path->hop_limit = req_msg->alt_hop_limit;
1610 alt_path->traffic_class = req_msg->alt_traffic_class;
1611 alt_path->reversible = 1;
1612 alt_path->pkey = req_msg->pkey;
1613 alt_path->sl = cm_req_get_alt_sl(req_msg);
1614 alt_path->mtu_selector = IB_SA_EQ;
1615 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1616 alt_path->rate_selector = IB_SA_EQ;
1617 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1618 alt_path->packet_life_time_selector = IB_SA_EQ;
1619 alt_path->packet_life_time =
1620 cm_req_get_alt_local_ack_timeout(req_msg);
1621 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1622 alt_path->service_id = req_msg->service_id;
1623
1624 if (sa_path_is_roce(alt_path))
1625 alt_path->roce.route_resolved = false;
1626 }
1627 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1628 }
1629
1630 static u16 cm_get_bth_pkey(struct cm_work *work)
1631 {
1632 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1633 u8 port_num = work->port->port_num;
1634 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1635 u16 pkey;
1636 int ret;
1637
1638 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1639 if (ret) {
1640 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1641 port_num, pkey_index, ret);
1642 return 0;
1643 }
1644
1645 return pkey;
1646 }
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657 static void cm_opa_to_ib_sgid(struct cm_work *work,
1658 struct sa_path_rec *path)
1659 {
1660 struct ib_device *dev = work->port->cm_dev->ib_device;
1661 u8 port_num = work->port->port_num;
1662
1663 if (rdma_cap_opa_ah(dev, port_num) &&
1664 (ib_is_opa_gid(&path->sgid))) {
1665 union ib_gid sgid;
1666
1667 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1668 dev_warn(&dev->dev,
1669 "Error updating sgid in CM request\n");
1670 return;
1671 }
1672
1673 path->sgid = sgid;
1674 }
1675 }
1676
1677 static void cm_format_req_event(struct cm_work *work,
1678 struct cm_id_private *cm_id_priv,
1679 struct ib_cm_id *listen_id)
1680 {
1681 struct cm_req_msg *req_msg;
1682 struct ib_cm_req_event_param *param;
1683
1684 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1685 param = &work->cm_event.param.req_rcvd;
1686 param->listen_id = listen_id;
1687 param->bth_pkey = cm_get_bth_pkey(work);
1688 param->port = cm_id_priv->av.port->port_num;
1689 param->primary_path = &work->path[0];
1690 cm_opa_to_ib_sgid(work, param->primary_path);
1691 if (cm_req_has_alt_path(req_msg)) {
1692 param->alternate_path = &work->path[1];
1693 cm_opa_to_ib_sgid(work, param->alternate_path);
1694 } else {
1695 param->alternate_path = NULL;
1696 }
1697 param->remote_ca_guid = req_msg->local_ca_guid;
1698 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1699 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1700 param->qp_type = cm_req_get_qp_type(req_msg);
1701 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1702 param->responder_resources = cm_req_get_init_depth(req_msg);
1703 param->initiator_depth = cm_req_get_resp_res(req_msg);
1704 param->local_cm_response_timeout =
1705 cm_req_get_remote_resp_timeout(req_msg);
1706 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1707 param->remote_cm_response_timeout =
1708 cm_req_get_local_resp_timeout(req_msg);
1709 param->retry_count = cm_req_get_retry_count(req_msg);
1710 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1711 param->srq = cm_req_get_srq(req_msg);
1712 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1713 work->cm_event.private_data = &req_msg->private_data;
1714 }
1715
1716 static void cm_process_work(struct cm_id_private *cm_id_priv,
1717 struct cm_work *work)
1718 {
1719 int ret;
1720
1721
1722 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1723 cm_free_work(work);
1724
1725 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1726 spin_lock_irq(&cm_id_priv->lock);
1727 work = cm_dequeue_work(cm_id_priv);
1728 spin_unlock_irq(&cm_id_priv->lock);
1729 if (!work)
1730 return;
1731
1732 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1733 &work->cm_event);
1734 cm_free_work(work);
1735 }
1736 cm_deref_id(cm_id_priv);
1737 if (ret)
1738 cm_destroy_id(&cm_id_priv->id, ret);
1739 }
1740
1741 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1742 struct cm_id_private *cm_id_priv,
1743 enum cm_msg_response msg_mraed, u8 service_timeout,
1744 const void *private_data, u8 private_data_len)
1745 {
1746 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1747 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1748 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1749 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1750 cm_mra_set_service_timeout(mra_msg, service_timeout);
1751
1752 if (private_data && private_data_len)
1753 memcpy(mra_msg->private_data, private_data, private_data_len);
1754 }
1755
1756 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1757 struct cm_id_private *cm_id_priv,
1758 enum ib_cm_rej_reason reason,
1759 void *ari,
1760 u8 ari_length,
1761 const void *private_data,
1762 u8 private_data_len)
1763 {
1764 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1765 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1766
1767 switch(cm_id_priv->id.state) {
1768 case IB_CM_REQ_RCVD:
1769 rej_msg->local_comm_id = 0;
1770 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1771 break;
1772 case IB_CM_MRA_REQ_SENT:
1773 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1774 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1775 break;
1776 case IB_CM_REP_RCVD:
1777 case IB_CM_MRA_REP_SENT:
1778 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1779 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1780 break;
1781 default:
1782 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1783 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1784 break;
1785 }
1786
1787 rej_msg->reason = cpu_to_be16(reason);
1788 if (ari && ari_length) {
1789 cm_rej_set_reject_info_len(rej_msg, ari_length);
1790 memcpy(rej_msg->ari, ari, ari_length);
1791 }
1792
1793 if (private_data && private_data_len)
1794 memcpy(rej_msg->private_data, private_data, private_data_len);
1795 }
1796
1797 static void cm_dup_req_handler(struct cm_work *work,
1798 struct cm_id_private *cm_id_priv)
1799 {
1800 struct ib_mad_send_buf *msg = NULL;
1801 int ret;
1802
1803 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1804 counter[CM_REQ_COUNTER]);
1805
1806
1807 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1808 return;
1809
1810 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1811 if (ret)
1812 return;
1813
1814 spin_lock_irq(&cm_id_priv->lock);
1815 switch (cm_id_priv->id.state) {
1816 case IB_CM_MRA_REQ_SENT:
1817 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1818 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1819 cm_id_priv->private_data,
1820 cm_id_priv->private_data_len);
1821 break;
1822 case IB_CM_TIMEWAIT:
1823 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1824 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1825 break;
1826 default:
1827 goto unlock;
1828 }
1829 spin_unlock_irq(&cm_id_priv->lock);
1830
1831 ret = ib_post_send_mad(msg, NULL);
1832 if (ret)
1833 goto free;
1834 return;
1835
1836 unlock: spin_unlock_irq(&cm_id_priv->lock);
1837 free: cm_free_msg(msg);
1838 }
1839
1840 static struct cm_id_private * cm_match_req(struct cm_work *work,
1841 struct cm_id_private *cm_id_priv)
1842 {
1843 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1844 struct cm_timewait_info *timewait_info;
1845 struct cm_req_msg *req_msg;
1846 struct ib_cm_id *cm_id;
1847
1848 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1849
1850
1851 spin_lock_irq(&cm.lock);
1852 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1853 if (timewait_info) {
1854 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1855 timewait_info->work.remote_id);
1856 spin_unlock_irq(&cm.lock);
1857 if (cur_cm_id_priv) {
1858 cm_dup_req_handler(work, cur_cm_id_priv);
1859 cm_deref_id(cur_cm_id_priv);
1860 }
1861 return NULL;
1862 }
1863
1864
1865 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1866 if (timewait_info) {
1867 cm_cleanup_timewait(cm_id_priv->timewait_info);
1868 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1869 timewait_info->work.remote_id);
1870
1871 spin_unlock_irq(&cm.lock);
1872 cm_issue_rej(work->port, work->mad_recv_wc,
1873 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1874 NULL, 0);
1875 if (cur_cm_id_priv) {
1876 cm_id = &cur_cm_id_priv->id;
1877 ib_send_cm_dreq(cm_id, NULL, 0);
1878 cm_deref_id(cur_cm_id_priv);
1879 }
1880 return NULL;
1881 }
1882
1883
1884 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1885 req_msg->service_id);
1886 if (!listen_cm_id_priv) {
1887 cm_cleanup_timewait(cm_id_priv->timewait_info);
1888 spin_unlock_irq(&cm.lock);
1889 cm_issue_rej(work->port, work->mad_recv_wc,
1890 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1891 NULL, 0);
1892 goto out;
1893 }
1894 atomic_inc(&listen_cm_id_priv->refcount);
1895 atomic_inc(&cm_id_priv->refcount);
1896 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1897 atomic_inc(&cm_id_priv->work_count);
1898 spin_unlock_irq(&cm.lock);
1899 out:
1900 return listen_cm_id_priv;
1901 }
1902
1903
1904
1905
1906
1907
1908 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1909 {
1910 if (!cm_req_get_primary_subnet_local(req_msg)) {
1911 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1912 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1913 cm_req_set_primary_sl(req_msg, wc->sl);
1914 }
1915
1916 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1917 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1918 }
1919
1920 if (!cm_req_get_alt_subnet_local(req_msg)) {
1921 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1922 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1923 cm_req_set_alt_sl(req_msg, wc->sl);
1924 }
1925
1926 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1927 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1928 }
1929 }
1930
1931 static int cm_req_handler(struct cm_work *work)
1932 {
1933 struct ib_cm_id *cm_id;
1934 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1935 struct cm_req_msg *req_msg;
1936 const struct ib_global_route *grh;
1937 const struct ib_gid_attr *gid_attr;
1938 int ret;
1939
1940 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1941
1942 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1943 if (IS_ERR(cm_id))
1944 return PTR_ERR(cm_id);
1945
1946 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1947 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1948 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1949 work->mad_recv_wc->recv_buf.grh,
1950 &cm_id_priv->av);
1951 if (ret)
1952 goto destroy;
1953 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1954 id.local_id);
1955 if (IS_ERR(cm_id_priv->timewait_info)) {
1956 ret = PTR_ERR(cm_id_priv->timewait_info);
1957 goto destroy;
1958 }
1959 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1960 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1961 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1962
1963 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1964 if (!listen_cm_id_priv) {
1965 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
1966 be32_to_cpu(cm_id->local_id));
1967 ret = -EINVAL;
1968 goto free_timeinfo;
1969 }
1970
1971 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1972 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1973 cm_id_priv->id.service_id = req_msg->service_id;
1974 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1975
1976 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1977
1978 memset(&work->path[0], 0, sizeof(work->path[0]));
1979 if (cm_req_has_alt_path(req_msg))
1980 memset(&work->path[1], 0, sizeof(work->path[1]));
1981 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1982 gid_attr = grh->sgid_attr;
1983
1984 if (gid_attr &&
1985 rdma_protocol_roce(work->port->cm_dev->ib_device,
1986 work->port->port_num)) {
1987 work->path[0].rec_type =
1988 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
1989 } else {
1990 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1991 work->port->port_num,
1992 &work->path[0],
1993 &req_msg->primary_local_gid);
1994 }
1995 if (cm_req_has_alt_path(req_msg))
1996 work->path[1].rec_type = work->path[0].rec_type;
1997 cm_format_paths_from_req(req_msg, &work->path[0],
1998 &work->path[1]);
1999 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2000 sa_path_set_dmac(&work->path[0],
2001 cm_id_priv->av.ah_attr.roce.dmac);
2002 work->path[0].hop_limit = grh->hop_limit;
2003 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2004 cm_id_priv);
2005 if (ret) {
2006 int err;
2007
2008 err = rdma_query_gid(work->port->cm_dev->ib_device,
2009 work->port->port_num, 0,
2010 &work->path[0].sgid);
2011 if (err)
2012 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2013 NULL, 0, NULL, 0);
2014 else
2015 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2016 &work->path[0].sgid,
2017 sizeof(work->path[0].sgid),
2018 NULL, 0);
2019 goto rejected;
2020 }
2021 if (cm_req_has_alt_path(req_msg)) {
2022 ret = cm_init_av_by_path(&work->path[1], NULL,
2023 &cm_id_priv->alt_av, cm_id_priv);
2024 if (ret) {
2025 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
2026 &work->path[0].sgid,
2027 sizeof(work->path[0].sgid), NULL, 0);
2028 goto rejected;
2029 }
2030 }
2031 cm_id_priv->tid = req_msg->hdr.tid;
2032 cm_id_priv->timeout_ms = cm_convert_to_ms(
2033 cm_req_get_local_resp_timeout(req_msg));
2034 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
2035 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
2036 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
2037 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
2038 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
2039 cm_id_priv->pkey = req_msg->pkey;
2040 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
2041 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
2042 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
2043 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2044
2045 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2046 cm_process_work(cm_id_priv, work);
2047 cm_deref_id(listen_cm_id_priv);
2048 return 0;
2049
2050 rejected:
2051 atomic_dec(&cm_id_priv->refcount);
2052 cm_deref_id(listen_cm_id_priv);
2053 free_timeinfo:
2054 kfree(cm_id_priv->timewait_info);
2055 destroy:
2056 ib_destroy_cm_id(cm_id);
2057 return ret;
2058 }
2059
2060 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2061 struct cm_id_private *cm_id_priv,
2062 struct ib_cm_rep_param *param)
2063 {
2064 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
2065 rep_msg->local_comm_id = cm_id_priv->id.local_id;
2066 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2067 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
2068 rep_msg->resp_resources = param->responder_resources;
2069 cm_rep_set_target_ack_delay(rep_msg,
2070 cm_id_priv->av.port->cm_dev->ack_delay);
2071 cm_rep_set_failover(rep_msg, param->failover_accepted);
2072 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
2073 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
2074
2075 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2076 rep_msg->initiator_depth = param->initiator_depth;
2077 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
2078 cm_rep_set_srq(rep_msg, param->srq);
2079 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
2080 } else {
2081 cm_rep_set_srq(rep_msg, 1);
2082 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
2083 }
2084
2085 if (param->private_data && param->private_data_len)
2086 memcpy(rep_msg->private_data, param->private_data,
2087 param->private_data_len);
2088 }
2089
2090 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2091 struct ib_cm_rep_param *param)
2092 {
2093 struct cm_id_private *cm_id_priv;
2094 struct ib_mad_send_buf *msg;
2095 struct cm_rep_msg *rep_msg;
2096 unsigned long flags;
2097 int ret;
2098
2099 if (param->private_data &&
2100 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2101 return -EINVAL;
2102
2103 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2104 spin_lock_irqsave(&cm_id_priv->lock, flags);
2105 if (cm_id->state != IB_CM_REQ_RCVD &&
2106 cm_id->state != IB_CM_MRA_REQ_SENT) {
2107 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2108 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2109 ret = -EINVAL;
2110 goto out;
2111 }
2112
2113 ret = cm_alloc_msg(cm_id_priv, &msg);
2114 if (ret)
2115 goto out;
2116
2117 rep_msg = (struct cm_rep_msg *) msg->mad;
2118 cm_format_rep(rep_msg, cm_id_priv, param);
2119 msg->timeout_ms = cm_id_priv->timeout_ms;
2120 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2121
2122 ret = ib_post_send_mad(msg, NULL);
2123 if (ret) {
2124 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2125 cm_free_msg(msg);
2126 return ret;
2127 }
2128
2129 cm_id->state = IB_CM_REP_SENT;
2130 cm_id_priv->msg = msg;
2131 cm_id_priv->initiator_depth = param->initiator_depth;
2132 cm_id_priv->responder_resources = param->responder_resources;
2133 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2134 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2135
2136 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2137 return ret;
2138 }
2139 EXPORT_SYMBOL(ib_send_cm_rep);
2140
2141 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2142 struct cm_id_private *cm_id_priv,
2143 const void *private_data,
2144 u8 private_data_len)
2145 {
2146 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2147 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2148 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2149
2150 if (private_data && private_data_len)
2151 memcpy(rtu_msg->private_data, private_data, private_data_len);
2152 }
2153
2154 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2155 const void *private_data,
2156 u8 private_data_len)
2157 {
2158 struct cm_id_private *cm_id_priv;
2159 struct ib_mad_send_buf *msg;
2160 unsigned long flags;
2161 void *data;
2162 int ret;
2163
2164 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2165 return -EINVAL;
2166
2167 data = cm_copy_private_data(private_data, private_data_len);
2168 if (IS_ERR(data))
2169 return PTR_ERR(data);
2170
2171 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2172 spin_lock_irqsave(&cm_id_priv->lock, flags);
2173 if (cm_id->state != IB_CM_REP_RCVD &&
2174 cm_id->state != IB_CM_MRA_REP_SENT) {
2175 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2176 be32_to_cpu(cm_id->local_id), cm_id->state);
2177 ret = -EINVAL;
2178 goto error;
2179 }
2180
2181 ret = cm_alloc_msg(cm_id_priv, &msg);
2182 if (ret)
2183 goto error;
2184
2185 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2186 private_data, private_data_len);
2187
2188 ret = ib_post_send_mad(msg, NULL);
2189 if (ret) {
2190 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2191 cm_free_msg(msg);
2192 kfree(data);
2193 return ret;
2194 }
2195
2196 cm_id->state = IB_CM_ESTABLISHED;
2197 cm_set_private_data(cm_id_priv, data, private_data_len);
2198 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2199 return 0;
2200
2201 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2202 kfree(data);
2203 return ret;
2204 }
2205 EXPORT_SYMBOL(ib_send_cm_rtu);
2206
2207 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2208 {
2209 struct cm_rep_msg *rep_msg;
2210 struct ib_cm_rep_event_param *param;
2211
2212 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2213 param = &work->cm_event.param.rep_rcvd;
2214 param->remote_ca_guid = rep_msg->local_ca_guid;
2215 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2216 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2217 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2218 param->responder_resources = rep_msg->initiator_depth;
2219 param->initiator_depth = rep_msg->resp_resources;
2220 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2221 param->failover_accepted = cm_rep_get_failover(rep_msg);
2222 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2223 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2224 param->srq = cm_rep_get_srq(rep_msg);
2225 work->cm_event.private_data = &rep_msg->private_data;
2226 }
2227
2228 static void cm_dup_rep_handler(struct cm_work *work)
2229 {
2230 struct cm_id_private *cm_id_priv;
2231 struct cm_rep_msg *rep_msg;
2232 struct ib_mad_send_buf *msg = NULL;
2233 int ret;
2234
2235 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2236 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2237 rep_msg->local_comm_id);
2238 if (!cm_id_priv)
2239 return;
2240
2241 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2242 counter[CM_REP_COUNTER]);
2243 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2244 if (ret)
2245 goto deref;
2246
2247 spin_lock_irq(&cm_id_priv->lock);
2248 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2249 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2250 cm_id_priv->private_data,
2251 cm_id_priv->private_data_len);
2252 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2253 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2254 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2255 cm_id_priv->private_data,
2256 cm_id_priv->private_data_len);
2257 else
2258 goto unlock;
2259 spin_unlock_irq(&cm_id_priv->lock);
2260
2261 ret = ib_post_send_mad(msg, NULL);
2262 if (ret)
2263 goto free;
2264 goto deref;
2265
2266 unlock: spin_unlock_irq(&cm_id_priv->lock);
2267 free: cm_free_msg(msg);
2268 deref: cm_deref_id(cm_id_priv);
2269 }
2270
2271 static int cm_rep_handler(struct cm_work *work)
2272 {
2273 struct cm_id_private *cm_id_priv;
2274 struct cm_rep_msg *rep_msg;
2275 int ret;
2276 struct cm_id_private *cur_cm_id_priv;
2277 struct ib_cm_id *cm_id;
2278 struct cm_timewait_info *timewait_info;
2279
2280 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2281 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2282 if (!cm_id_priv) {
2283 cm_dup_rep_handler(work);
2284 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2285 be32_to_cpu(rep_msg->remote_comm_id));
2286 return -EINVAL;
2287 }
2288
2289 cm_format_rep_event(work, cm_id_priv->qp_type);
2290
2291 spin_lock_irq(&cm_id_priv->lock);
2292 switch (cm_id_priv->id.state) {
2293 case IB_CM_REQ_SENT:
2294 case IB_CM_MRA_REQ_RCVD:
2295 break;
2296 default:
2297 spin_unlock_irq(&cm_id_priv->lock);
2298 ret = -EINVAL;
2299 pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2300 __func__, cm_id_priv->id.state,
2301 be32_to_cpu(rep_msg->local_comm_id),
2302 be32_to_cpu(rep_msg->remote_comm_id));
2303 goto error;
2304 }
2305
2306 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2307 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2308 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2309
2310 spin_lock(&cm.lock);
2311
2312 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2313 spin_unlock(&cm.lock);
2314 spin_unlock_irq(&cm_id_priv->lock);
2315 ret = -EINVAL;
2316 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2317 be32_to_cpu(rep_msg->remote_comm_id));
2318 goto error;
2319 }
2320
2321 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2322 if (timewait_info) {
2323 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2324 &cm.remote_id_table);
2325 cm_id_priv->timewait_info->inserted_remote_id = 0;
2326 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2327 timewait_info->work.remote_id);
2328
2329 spin_unlock(&cm.lock);
2330 spin_unlock_irq(&cm_id_priv->lock);
2331 cm_issue_rej(work->port, work->mad_recv_wc,
2332 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2333 NULL, 0);
2334 ret = -EINVAL;
2335 pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2336 __func__, be32_to_cpu(rep_msg->local_comm_id),
2337 be32_to_cpu(rep_msg->remote_comm_id));
2338
2339 if (cur_cm_id_priv) {
2340 cm_id = &cur_cm_id_priv->id;
2341 ib_send_cm_dreq(cm_id, NULL, 0);
2342 cm_deref_id(cur_cm_id_priv);
2343 }
2344
2345 goto error;
2346 }
2347 spin_unlock(&cm.lock);
2348
2349 cm_id_priv->id.state = IB_CM_REP_RCVD;
2350 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2351 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2352 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2353 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2354 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2355 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2356 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2357 cm_id_priv->av.timeout =
2358 cm_ack_timeout(cm_id_priv->target_ack_delay,
2359 cm_id_priv->av.timeout - 1);
2360 cm_id_priv->alt_av.timeout =
2361 cm_ack_timeout(cm_id_priv->target_ack_delay,
2362 cm_id_priv->alt_av.timeout - 1);
2363
2364
2365
2366 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2367 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2368 if (!ret)
2369 list_add_tail(&work->list, &cm_id_priv->work_list);
2370 spin_unlock_irq(&cm_id_priv->lock);
2371
2372 if (ret)
2373 cm_process_work(cm_id_priv, work);
2374 else
2375 cm_deref_id(cm_id_priv);
2376 return 0;
2377
2378 error:
2379 cm_deref_id(cm_id_priv);
2380 return ret;
2381 }
2382
2383 static int cm_establish_handler(struct cm_work *work)
2384 {
2385 struct cm_id_private *cm_id_priv;
2386 int ret;
2387
2388
2389 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2390 if (!cm_id_priv)
2391 return -EINVAL;
2392
2393 spin_lock_irq(&cm_id_priv->lock);
2394 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2395 spin_unlock_irq(&cm_id_priv->lock);
2396 goto out;
2397 }
2398
2399 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2400 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2401 if (!ret)
2402 list_add_tail(&work->list, &cm_id_priv->work_list);
2403 spin_unlock_irq(&cm_id_priv->lock);
2404
2405 if (ret)
2406 cm_process_work(cm_id_priv, work);
2407 else
2408 cm_deref_id(cm_id_priv);
2409 return 0;
2410 out:
2411 cm_deref_id(cm_id_priv);
2412 return -EINVAL;
2413 }
2414
2415 static int cm_rtu_handler(struct cm_work *work)
2416 {
2417 struct cm_id_private *cm_id_priv;
2418 struct cm_rtu_msg *rtu_msg;
2419 int ret;
2420
2421 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2422 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2423 rtu_msg->local_comm_id);
2424 if (!cm_id_priv)
2425 return -EINVAL;
2426
2427 work->cm_event.private_data = &rtu_msg->private_data;
2428
2429 spin_lock_irq(&cm_id_priv->lock);
2430 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2431 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2432 spin_unlock_irq(&cm_id_priv->lock);
2433 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2434 counter[CM_RTU_COUNTER]);
2435 goto out;
2436 }
2437 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2438
2439 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2440 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2441 if (!ret)
2442 list_add_tail(&work->list, &cm_id_priv->work_list);
2443 spin_unlock_irq(&cm_id_priv->lock);
2444
2445 if (ret)
2446 cm_process_work(cm_id_priv, work);
2447 else
2448 cm_deref_id(cm_id_priv);
2449 return 0;
2450 out:
2451 cm_deref_id(cm_id_priv);
2452 return -EINVAL;
2453 }
2454
2455 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2456 struct cm_id_private *cm_id_priv,
2457 const void *private_data,
2458 u8 private_data_len)
2459 {
2460 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2461 cm_form_tid(cm_id_priv));
2462 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2463 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2464 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2465
2466 if (private_data && private_data_len)
2467 memcpy(dreq_msg->private_data, private_data, private_data_len);
2468 }
2469
2470 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2471 const void *private_data,
2472 u8 private_data_len)
2473 {
2474 struct cm_id_private *cm_id_priv;
2475 struct ib_mad_send_buf *msg;
2476 unsigned long flags;
2477 int ret;
2478
2479 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2480 return -EINVAL;
2481
2482 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2483 spin_lock_irqsave(&cm_id_priv->lock, flags);
2484 if (cm_id->state != IB_CM_ESTABLISHED) {
2485 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2486 be32_to_cpu(cm_id->local_id), cm_id->state);
2487 ret = -EINVAL;
2488 goto out;
2489 }
2490
2491 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2492 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2493 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2494
2495 ret = cm_alloc_msg(cm_id_priv, &msg);
2496 if (ret) {
2497 cm_enter_timewait(cm_id_priv);
2498 goto out;
2499 }
2500
2501 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2502 private_data, private_data_len);
2503 msg->timeout_ms = cm_id_priv->timeout_ms;
2504 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2505
2506 ret = ib_post_send_mad(msg, NULL);
2507 if (ret) {
2508 cm_enter_timewait(cm_id_priv);
2509 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2510 cm_free_msg(msg);
2511 return ret;
2512 }
2513
2514 cm_id->state = IB_CM_DREQ_SENT;
2515 cm_id_priv->msg = msg;
2516 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2517 return ret;
2518 }
2519 EXPORT_SYMBOL(ib_send_cm_dreq);
2520
2521 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2522 struct cm_id_private *cm_id_priv,
2523 const void *private_data,
2524 u8 private_data_len)
2525 {
2526 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2527 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2528 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2529
2530 if (private_data && private_data_len)
2531 memcpy(drep_msg->private_data, private_data, private_data_len);
2532 }
2533
2534 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2535 const void *private_data,
2536 u8 private_data_len)
2537 {
2538 struct cm_id_private *cm_id_priv;
2539 struct ib_mad_send_buf *msg;
2540 unsigned long flags;
2541 void *data;
2542 int ret;
2543
2544 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2545 return -EINVAL;
2546
2547 data = cm_copy_private_data(private_data, private_data_len);
2548 if (IS_ERR(data))
2549 return PTR_ERR(data);
2550
2551 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2552 spin_lock_irqsave(&cm_id_priv->lock, flags);
2553 if (cm_id->state != IB_CM_DREQ_RCVD) {
2554 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2555 kfree(data);
2556 pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2557 __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
2558 return -EINVAL;
2559 }
2560
2561 cm_set_private_data(cm_id_priv, data, private_data_len);
2562 cm_enter_timewait(cm_id_priv);
2563
2564 ret = cm_alloc_msg(cm_id_priv, &msg);
2565 if (ret)
2566 goto out;
2567
2568 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2569 private_data, private_data_len);
2570
2571 ret = ib_post_send_mad(msg, NULL);
2572 if (ret) {
2573 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2574 cm_free_msg(msg);
2575 return ret;
2576 }
2577
2578 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2579 return ret;
2580 }
2581 EXPORT_SYMBOL(ib_send_cm_drep);
2582
2583 static int cm_issue_drep(struct cm_port *port,
2584 struct ib_mad_recv_wc *mad_recv_wc)
2585 {
2586 struct ib_mad_send_buf *msg = NULL;
2587 struct cm_dreq_msg *dreq_msg;
2588 struct cm_drep_msg *drep_msg;
2589 int ret;
2590
2591 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2592 if (ret)
2593 return ret;
2594
2595 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2596 drep_msg = (struct cm_drep_msg *) msg->mad;
2597
2598 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2599 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2600 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2601
2602 ret = ib_post_send_mad(msg, NULL);
2603 if (ret)
2604 cm_free_msg(msg);
2605
2606 return ret;
2607 }
2608
2609 static int cm_dreq_handler(struct cm_work *work)
2610 {
2611 struct cm_id_private *cm_id_priv;
2612 struct cm_dreq_msg *dreq_msg;
2613 struct ib_mad_send_buf *msg = NULL;
2614 int ret;
2615
2616 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2617 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2618 dreq_msg->local_comm_id);
2619 if (!cm_id_priv) {
2620 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2621 counter[CM_DREQ_COUNTER]);
2622 cm_issue_drep(work->port, work->mad_recv_wc);
2623 pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2624 __func__, be32_to_cpu(dreq_msg->local_comm_id),
2625 be32_to_cpu(dreq_msg->remote_comm_id));
2626 return -EINVAL;
2627 }
2628
2629 work->cm_event.private_data = &dreq_msg->private_data;
2630
2631 spin_lock_irq(&cm_id_priv->lock);
2632 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2633 goto unlock;
2634
2635 switch (cm_id_priv->id.state) {
2636 case IB_CM_REP_SENT:
2637 case IB_CM_DREQ_SENT:
2638 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2639 break;
2640 case IB_CM_ESTABLISHED:
2641 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2642 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2643 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2644 break;
2645 case IB_CM_MRA_REP_RCVD:
2646 break;
2647 case IB_CM_TIMEWAIT:
2648 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2649 counter[CM_DREQ_COUNTER]);
2650 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2651 if (IS_ERR(msg))
2652 goto unlock;
2653
2654 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2655 cm_id_priv->private_data,
2656 cm_id_priv->private_data_len);
2657 spin_unlock_irq(&cm_id_priv->lock);
2658
2659 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2660 ib_post_send_mad(msg, NULL))
2661 cm_free_msg(msg);
2662 goto deref;
2663 case IB_CM_DREQ_RCVD:
2664 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2665 counter[CM_DREQ_COUNTER]);
2666 goto unlock;
2667 default:
2668 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2669 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2670 cm_id_priv->id.state);
2671 goto unlock;
2672 }
2673 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2674 cm_id_priv->tid = dreq_msg->hdr.tid;
2675 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2676 if (!ret)
2677 list_add_tail(&work->list, &cm_id_priv->work_list);
2678 spin_unlock_irq(&cm_id_priv->lock);
2679
2680 if (ret)
2681 cm_process_work(cm_id_priv, work);
2682 else
2683 cm_deref_id(cm_id_priv);
2684 return 0;
2685
2686 unlock: spin_unlock_irq(&cm_id_priv->lock);
2687 deref: cm_deref_id(cm_id_priv);
2688 return -EINVAL;
2689 }
2690
2691 static int cm_drep_handler(struct cm_work *work)
2692 {
2693 struct cm_id_private *cm_id_priv;
2694 struct cm_drep_msg *drep_msg;
2695 int ret;
2696
2697 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2698 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2699 drep_msg->local_comm_id);
2700 if (!cm_id_priv)
2701 return -EINVAL;
2702
2703 work->cm_event.private_data = &drep_msg->private_data;
2704
2705 spin_lock_irq(&cm_id_priv->lock);
2706 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2707 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2708 spin_unlock_irq(&cm_id_priv->lock);
2709 goto out;
2710 }
2711 cm_enter_timewait(cm_id_priv);
2712
2713 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2714 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2715 if (!ret)
2716 list_add_tail(&work->list, &cm_id_priv->work_list);
2717 spin_unlock_irq(&cm_id_priv->lock);
2718
2719 if (ret)
2720 cm_process_work(cm_id_priv, work);
2721 else
2722 cm_deref_id(cm_id_priv);
2723 return 0;
2724 out:
2725 cm_deref_id(cm_id_priv);
2726 return -EINVAL;
2727 }
2728
2729 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2730 enum ib_cm_rej_reason reason,
2731 void *ari,
2732 u8 ari_length,
2733 const void *private_data,
2734 u8 private_data_len)
2735 {
2736 struct cm_id_private *cm_id_priv;
2737 struct ib_mad_send_buf *msg;
2738 unsigned long flags;
2739 int ret;
2740
2741 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2742 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2743 return -EINVAL;
2744
2745 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2746
2747 spin_lock_irqsave(&cm_id_priv->lock, flags);
2748 switch (cm_id->state) {
2749 case IB_CM_REQ_SENT:
2750 case IB_CM_MRA_REQ_RCVD:
2751 case IB_CM_REQ_RCVD:
2752 case IB_CM_MRA_REQ_SENT:
2753 case IB_CM_REP_RCVD:
2754 case IB_CM_MRA_REP_SENT:
2755 ret = cm_alloc_msg(cm_id_priv, &msg);
2756 if (!ret)
2757 cm_format_rej((struct cm_rej_msg *) msg->mad,
2758 cm_id_priv, reason, ari, ari_length,
2759 private_data, private_data_len);
2760
2761 cm_reset_to_idle(cm_id_priv);
2762 break;
2763 case IB_CM_REP_SENT:
2764 case IB_CM_MRA_REP_RCVD:
2765 ret = cm_alloc_msg(cm_id_priv, &msg);
2766 if (!ret)
2767 cm_format_rej((struct cm_rej_msg *) msg->mad,
2768 cm_id_priv, reason, ari, ari_length,
2769 private_data, private_data_len);
2770
2771 cm_enter_timewait(cm_id_priv);
2772 break;
2773 default:
2774 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2775 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2776 ret = -EINVAL;
2777 goto out;
2778 }
2779
2780 if (ret)
2781 goto out;
2782
2783 ret = ib_post_send_mad(msg, NULL);
2784 if (ret)
2785 cm_free_msg(msg);
2786
2787 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2788 return ret;
2789 }
2790 EXPORT_SYMBOL(ib_send_cm_rej);
2791
2792 static void cm_format_rej_event(struct cm_work *work)
2793 {
2794 struct cm_rej_msg *rej_msg;
2795 struct ib_cm_rej_event_param *param;
2796
2797 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2798 param = &work->cm_event.param.rej_rcvd;
2799 param->ari = rej_msg->ari;
2800 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2801 param->reason = __be16_to_cpu(rej_msg->reason);
2802 work->cm_event.private_data = &rej_msg->private_data;
2803 }
2804
2805 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2806 {
2807 struct cm_timewait_info *timewait_info;
2808 struct cm_id_private *cm_id_priv;
2809 __be32 remote_id;
2810
2811 remote_id = rej_msg->local_comm_id;
2812
2813 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2814 spin_lock_irq(&cm.lock);
2815 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2816 remote_id);
2817 if (!timewait_info) {
2818 spin_unlock_irq(&cm.lock);
2819 return NULL;
2820 }
2821 cm_id_priv = xa_load(&cm.local_id_table,
2822 cm_local_id(timewait_info->work.local_id));
2823 if (cm_id_priv) {
2824 if (cm_id_priv->id.remote_id == remote_id)
2825 atomic_inc(&cm_id_priv->refcount);
2826 else
2827 cm_id_priv = NULL;
2828 }
2829 spin_unlock_irq(&cm.lock);
2830 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2831 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2832 else
2833 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2834
2835 return cm_id_priv;
2836 }
2837
2838 static int cm_rej_handler(struct cm_work *work)
2839 {
2840 struct cm_id_private *cm_id_priv;
2841 struct cm_rej_msg *rej_msg;
2842 int ret;
2843
2844 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2845 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2846 if (!cm_id_priv)
2847 return -EINVAL;
2848
2849 cm_format_rej_event(work);
2850
2851 spin_lock_irq(&cm_id_priv->lock);
2852 switch (cm_id_priv->id.state) {
2853 case IB_CM_REQ_SENT:
2854 case IB_CM_MRA_REQ_RCVD:
2855 case IB_CM_REP_SENT:
2856 case IB_CM_MRA_REP_RCVD:
2857 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2858
2859 case IB_CM_REQ_RCVD:
2860 case IB_CM_MRA_REQ_SENT:
2861 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2862 cm_enter_timewait(cm_id_priv);
2863 else
2864 cm_reset_to_idle(cm_id_priv);
2865 break;
2866 case IB_CM_DREQ_SENT:
2867 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2868
2869 case IB_CM_REP_RCVD:
2870 case IB_CM_MRA_REP_SENT:
2871 cm_enter_timewait(cm_id_priv);
2872 break;
2873 case IB_CM_ESTABLISHED:
2874 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2875 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2876 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2877 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2878 cm_id_priv->msg);
2879 cm_enter_timewait(cm_id_priv);
2880 break;
2881 }
2882
2883 default:
2884 spin_unlock_irq(&cm_id_priv->lock);
2885 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2886 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2887 cm_id_priv->id.state);
2888 ret = -EINVAL;
2889 goto out;
2890 }
2891
2892 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2893 if (!ret)
2894 list_add_tail(&work->list, &cm_id_priv->work_list);
2895 spin_unlock_irq(&cm_id_priv->lock);
2896
2897 if (ret)
2898 cm_process_work(cm_id_priv, work);
2899 else
2900 cm_deref_id(cm_id_priv);
2901 return 0;
2902 out:
2903 cm_deref_id(cm_id_priv);
2904 return -EINVAL;
2905 }
2906
2907 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2908 u8 service_timeout,
2909 const void *private_data,
2910 u8 private_data_len)
2911 {
2912 struct cm_id_private *cm_id_priv;
2913 struct ib_mad_send_buf *msg;
2914 enum ib_cm_state cm_state;
2915 enum ib_cm_lap_state lap_state;
2916 enum cm_msg_response msg_response;
2917 void *data;
2918 unsigned long flags;
2919 int ret;
2920
2921 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2922 return -EINVAL;
2923
2924 data = cm_copy_private_data(private_data, private_data_len);
2925 if (IS_ERR(data))
2926 return PTR_ERR(data);
2927
2928 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2929
2930 spin_lock_irqsave(&cm_id_priv->lock, flags);
2931 switch(cm_id_priv->id.state) {
2932 case IB_CM_REQ_RCVD:
2933 cm_state = IB_CM_MRA_REQ_SENT;
2934 lap_state = cm_id->lap_state;
2935 msg_response = CM_MSG_RESPONSE_REQ;
2936 break;
2937 case IB_CM_REP_RCVD:
2938 cm_state = IB_CM_MRA_REP_SENT;
2939 lap_state = cm_id->lap_state;
2940 msg_response = CM_MSG_RESPONSE_REP;
2941 break;
2942 case IB_CM_ESTABLISHED:
2943 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2944 cm_state = cm_id->state;
2945 lap_state = IB_CM_MRA_LAP_SENT;
2946 msg_response = CM_MSG_RESPONSE_OTHER;
2947 break;
2948 }
2949
2950 default:
2951 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2952 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2953 cm_id_priv->id.state);
2954 ret = -EINVAL;
2955 goto error1;
2956 }
2957
2958 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2959 ret = cm_alloc_msg(cm_id_priv, &msg);
2960 if (ret)
2961 goto error1;
2962
2963 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2964 msg_response, service_timeout,
2965 private_data, private_data_len);
2966 ret = ib_post_send_mad(msg, NULL);
2967 if (ret)
2968 goto error2;
2969 }
2970
2971 cm_id->state = cm_state;
2972 cm_id->lap_state = lap_state;
2973 cm_id_priv->service_timeout = service_timeout;
2974 cm_set_private_data(cm_id_priv, data, private_data_len);
2975 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2976 return 0;
2977
2978 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2979 kfree(data);
2980 return ret;
2981
2982 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2983 kfree(data);
2984 cm_free_msg(msg);
2985 return ret;
2986 }
2987 EXPORT_SYMBOL(ib_send_cm_mra);
2988
2989 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2990 {
2991 switch (cm_mra_get_msg_mraed(mra_msg)) {
2992 case CM_MSG_RESPONSE_REQ:
2993 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2994 case CM_MSG_RESPONSE_REP:
2995 case CM_MSG_RESPONSE_OTHER:
2996 return cm_acquire_id(mra_msg->remote_comm_id,
2997 mra_msg->local_comm_id);
2998 default:
2999 return NULL;
3000 }
3001 }
3002
3003 static int cm_mra_handler(struct cm_work *work)
3004 {
3005 struct cm_id_private *cm_id_priv;
3006 struct cm_mra_msg *mra_msg;
3007 int timeout, ret;
3008
3009 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3010 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3011 if (!cm_id_priv)
3012 return -EINVAL;
3013
3014 work->cm_event.private_data = &mra_msg->private_data;
3015 work->cm_event.param.mra_rcvd.service_timeout =
3016 cm_mra_get_service_timeout(mra_msg);
3017 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
3018 cm_convert_to_ms(cm_id_priv->av.timeout);
3019
3020 spin_lock_irq(&cm_id_priv->lock);
3021 switch (cm_id_priv->id.state) {
3022 case IB_CM_REQ_SENT:
3023 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
3024 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3025 cm_id_priv->msg, timeout))
3026 goto out;
3027 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3028 break;
3029 case IB_CM_REP_SENT:
3030 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
3031 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3032 cm_id_priv->msg, timeout))
3033 goto out;
3034 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3035 break;
3036 case IB_CM_ESTABLISHED:
3037 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
3038 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3039 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3040 cm_id_priv->msg, timeout)) {
3041 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3042 atomic_long_inc(&work->port->
3043 counter_group[CM_RECV_DUPLICATES].
3044 counter[CM_MRA_COUNTER]);
3045 goto out;
3046 }
3047 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3048 break;
3049 case IB_CM_MRA_REQ_RCVD:
3050 case IB_CM_MRA_REP_RCVD:
3051 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3052 counter[CM_MRA_COUNTER]);
3053
3054 default:
3055 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3056 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3057 cm_id_priv->id.state);
3058 goto out;
3059 }
3060
3061 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3062 cm_id_priv->id.state;
3063 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3064 if (!ret)
3065 list_add_tail(&work->list, &cm_id_priv->work_list);
3066 spin_unlock_irq(&cm_id_priv->lock);
3067
3068 if (ret)
3069 cm_process_work(cm_id_priv, work);
3070 else
3071 cm_deref_id(cm_id_priv);
3072 return 0;
3073 out:
3074 spin_unlock_irq(&cm_id_priv->lock);
3075 cm_deref_id(cm_id_priv);
3076 return -EINVAL;
3077 }
3078
3079 static void cm_format_lap(struct cm_lap_msg *lap_msg,
3080 struct cm_id_private *cm_id_priv,
3081 struct sa_path_rec *alternate_path,
3082 const void *private_data,
3083 u8 private_data_len)
3084 {
3085 bool alt_ext = false;
3086
3087 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
3088 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
3089 alternate_path->opa.slid);
3090 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
3091 cm_form_tid(cm_id_priv));
3092 lap_msg->local_comm_id = cm_id_priv->id.local_id;
3093 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
3094 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
3095
3096 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
3097 lap_msg->alt_local_lid =
3098 htons(ntohl(sa_path_get_slid(alternate_path)));
3099 lap_msg->alt_remote_lid =
3100 htons(ntohl(sa_path_get_dlid(alternate_path)));
3101 lap_msg->alt_local_gid = alternate_path->sgid;
3102 lap_msg->alt_remote_gid = alternate_path->dgid;
3103 if (alt_ext) {
3104 lap_msg->alt_local_gid.global.interface_id
3105 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
3106 lap_msg->alt_remote_gid.global.interface_id
3107 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
3108 }
3109 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
3110 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
3111 lap_msg->alt_hop_limit = alternate_path->hop_limit;
3112 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
3113 cm_lap_set_sl(lap_msg, alternate_path->sl);
3114 cm_lap_set_subnet_local(lap_msg, 1);
3115 cm_lap_set_local_ack_timeout(lap_msg,
3116 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
3117 alternate_path->packet_life_time));
3118
3119 if (private_data && private_data_len)
3120 memcpy(lap_msg->private_data, private_data, private_data_len);
3121 }
3122
3123 int ib_send_cm_lap(struct ib_cm_id *cm_id,
3124 struct sa_path_rec *alternate_path,
3125 const void *private_data,
3126 u8 private_data_len)
3127 {
3128 struct cm_id_private *cm_id_priv;
3129 struct ib_mad_send_buf *msg;
3130 unsigned long flags;
3131 int ret;
3132
3133 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
3134 return -EINVAL;
3135
3136 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3137 spin_lock_irqsave(&cm_id_priv->lock, flags);
3138 if (cm_id->state != IB_CM_ESTABLISHED ||
3139 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3140 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3141 ret = -EINVAL;
3142 goto out;
3143 }
3144
3145 ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
3146 cm_id_priv);
3147 if (ret)
3148 goto out;
3149 cm_id_priv->alt_av.timeout =
3150 cm_ack_timeout(cm_id_priv->target_ack_delay,
3151 cm_id_priv->alt_av.timeout - 1);
3152
3153 ret = cm_alloc_msg(cm_id_priv, &msg);
3154 if (ret)
3155 goto out;
3156
3157 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3158 alternate_path, private_data, private_data_len);
3159 msg->timeout_ms = cm_id_priv->timeout_ms;
3160 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3161
3162 ret = ib_post_send_mad(msg, NULL);
3163 if (ret) {
3164 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3165 cm_free_msg(msg);
3166 return ret;
3167 }
3168
3169 cm_id->lap_state = IB_CM_LAP_SENT;
3170 cm_id_priv->msg = msg;
3171
3172 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3173 return ret;
3174 }
3175 EXPORT_SYMBOL(ib_send_cm_lap);
3176
3177 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3178 struct sa_path_rec *path)
3179 {
3180 u32 lid;
3181
3182 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3183 sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
3184 sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
3185 } else {
3186 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3187 sa_path_set_dlid(path, lid);
3188
3189 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3190 sa_path_set_slid(path, lid);
3191 }
3192 }
3193
3194 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3195 struct sa_path_rec *path,
3196 struct cm_lap_msg *lap_msg)
3197 {
3198 path->dgid = lap_msg->alt_local_gid;
3199 path->sgid = lap_msg->alt_remote_gid;
3200 path->flow_label = cm_lap_get_flow_label(lap_msg);
3201 path->hop_limit = lap_msg->alt_hop_limit;
3202 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3203 path->reversible = 1;
3204 path->pkey = cm_id_priv->pkey;
3205 path->sl = cm_lap_get_sl(lap_msg);
3206 path->mtu_selector = IB_SA_EQ;
3207 path->mtu = cm_id_priv->path_mtu;
3208 path->rate_selector = IB_SA_EQ;
3209 path->rate = cm_lap_get_packet_rate(lap_msg);
3210 path->packet_life_time_selector = IB_SA_EQ;
3211 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3212 path->packet_life_time -= (path->packet_life_time > 0);
3213 cm_format_path_lid_from_lap(lap_msg, path);
3214 }
3215
3216 static int cm_lap_handler(struct cm_work *work)
3217 {
3218 struct cm_id_private *cm_id_priv;
3219 struct cm_lap_msg *lap_msg;
3220 struct ib_cm_lap_event_param *param;
3221 struct ib_mad_send_buf *msg = NULL;
3222 int ret;
3223
3224
3225
3226
3227 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3228 work->port->port_num))
3229 return -EINVAL;
3230
3231
3232 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3233 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3234 lap_msg->local_comm_id);
3235 if (!cm_id_priv)
3236 return -EINVAL;
3237
3238 param = &work->cm_event.param.lap_rcvd;
3239 memset(&work->path[0], 0, sizeof(work->path[1]));
3240 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3241 work->port->port_num,
3242 &work->path[0],
3243 &lap_msg->alt_local_gid);
3244 param->alternate_path = &work->path[0];
3245 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3246 work->cm_event.private_data = &lap_msg->private_data;
3247
3248 spin_lock_irq(&cm_id_priv->lock);
3249 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3250 goto unlock;
3251
3252 switch (cm_id_priv->id.lap_state) {
3253 case IB_CM_LAP_UNINIT:
3254 case IB_CM_LAP_IDLE:
3255 break;
3256 case IB_CM_MRA_LAP_SENT:
3257 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3258 counter[CM_LAP_COUNTER]);
3259 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3260 if (IS_ERR(msg))
3261 goto unlock;
3262
3263 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3264 CM_MSG_RESPONSE_OTHER,
3265 cm_id_priv->service_timeout,
3266 cm_id_priv->private_data,
3267 cm_id_priv->private_data_len);
3268 spin_unlock_irq(&cm_id_priv->lock);
3269
3270 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3271 ib_post_send_mad(msg, NULL))
3272 cm_free_msg(msg);
3273 goto deref;
3274 case IB_CM_LAP_RCVD:
3275 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3276 counter[CM_LAP_COUNTER]);
3277 goto unlock;
3278 default:
3279 goto unlock;
3280 }
3281
3282 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3283 work->mad_recv_wc->recv_buf.grh,
3284 &cm_id_priv->av);
3285 if (ret)
3286 goto unlock;
3287
3288 ret = cm_init_av_by_path(param->alternate_path, NULL,
3289 &cm_id_priv->alt_av, cm_id_priv);
3290 if (ret)
3291 goto unlock;
3292
3293 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3294 cm_id_priv->tid = lap_msg->hdr.tid;
3295 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3296 if (!ret)
3297 list_add_tail(&work->list, &cm_id_priv->work_list);
3298 spin_unlock_irq(&cm_id_priv->lock);
3299
3300 if (ret)
3301 cm_process_work(cm_id_priv, work);
3302 else
3303 cm_deref_id(cm_id_priv);
3304 return 0;
3305
3306 unlock: spin_unlock_irq(&cm_id_priv->lock);
3307 deref: cm_deref_id(cm_id_priv);
3308 return -EINVAL;
3309 }
3310
3311 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3312 struct cm_id_private *cm_id_priv,
3313 enum ib_cm_apr_status status,
3314 void *info,
3315 u8 info_length,
3316 const void *private_data,
3317 u8 private_data_len)
3318 {
3319 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3320 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3321 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3322 apr_msg->ap_status = (u8) status;
3323
3324 if (info && info_length) {
3325 apr_msg->info_length = info_length;
3326 memcpy(apr_msg->info, info, info_length);
3327 }
3328
3329 if (private_data && private_data_len)
3330 memcpy(apr_msg->private_data, private_data, private_data_len);
3331 }
3332
3333 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3334 enum ib_cm_apr_status status,
3335 void *info,
3336 u8 info_length,
3337 const void *private_data,
3338 u8 private_data_len)
3339 {
3340 struct cm_id_private *cm_id_priv;
3341 struct ib_mad_send_buf *msg;
3342 unsigned long flags;
3343 int ret;
3344
3345 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3346 (info && info_length > IB_CM_APR_INFO_LENGTH))
3347 return -EINVAL;
3348
3349 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3350 spin_lock_irqsave(&cm_id_priv->lock, flags);
3351 if (cm_id->state != IB_CM_ESTABLISHED ||
3352 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3353 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3354 ret = -EINVAL;
3355 goto out;
3356 }
3357
3358 ret = cm_alloc_msg(cm_id_priv, &msg);
3359 if (ret)
3360 goto out;
3361
3362 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3363 info, info_length, private_data, private_data_len);
3364 ret = ib_post_send_mad(msg, NULL);
3365 if (ret) {
3366 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3367 cm_free_msg(msg);
3368 return ret;
3369 }
3370
3371 cm_id->lap_state = IB_CM_LAP_IDLE;
3372 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3373 return ret;
3374 }
3375 EXPORT_SYMBOL(ib_send_cm_apr);
3376
3377 static int cm_apr_handler(struct cm_work *work)
3378 {
3379 struct cm_id_private *cm_id_priv;
3380 struct cm_apr_msg *apr_msg;
3381 int ret;
3382
3383
3384
3385
3386 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3387 work->port->port_num))
3388 return -EINVAL;
3389
3390 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3391 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3392 apr_msg->local_comm_id);
3393 if (!cm_id_priv)
3394 return -EINVAL;
3395
3396 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3397 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3398 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3399 work->cm_event.private_data = &apr_msg->private_data;
3400
3401 spin_lock_irq(&cm_id_priv->lock);
3402 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3403 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3404 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3405 spin_unlock_irq(&cm_id_priv->lock);
3406 goto out;
3407 }
3408 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3409 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3410 cm_id_priv->msg = NULL;
3411
3412 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3413 if (!ret)
3414 list_add_tail(&work->list, &cm_id_priv->work_list);
3415 spin_unlock_irq(&cm_id_priv->lock);
3416
3417 if (ret)
3418 cm_process_work(cm_id_priv, work);
3419 else
3420 cm_deref_id(cm_id_priv);
3421 return 0;
3422 out:
3423 cm_deref_id(cm_id_priv);
3424 return -EINVAL;
3425 }
3426
3427 static int cm_timewait_handler(struct cm_work *work)
3428 {
3429 struct cm_timewait_info *timewait_info;
3430 struct cm_id_private *cm_id_priv;
3431 int ret;
3432
3433 timewait_info = (struct cm_timewait_info *)work;
3434 spin_lock_irq(&cm.lock);
3435 list_del(&timewait_info->list);
3436 spin_unlock_irq(&cm.lock);
3437
3438 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3439 timewait_info->work.remote_id);
3440 if (!cm_id_priv)
3441 return -EINVAL;
3442
3443 spin_lock_irq(&cm_id_priv->lock);
3444 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3445 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3446 spin_unlock_irq(&cm_id_priv->lock);
3447 goto out;
3448 }
3449 cm_id_priv->id.state = IB_CM_IDLE;
3450 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3451 if (!ret)
3452 list_add_tail(&work->list, &cm_id_priv->work_list);
3453 spin_unlock_irq(&cm_id_priv->lock);
3454
3455 if (ret)
3456 cm_process_work(cm_id_priv, work);
3457 else
3458 cm_deref_id(cm_id_priv);
3459 return 0;
3460 out:
3461 cm_deref_id(cm_id_priv);
3462 return -EINVAL;
3463 }
3464
3465 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3466 struct cm_id_private *cm_id_priv,
3467 struct ib_cm_sidr_req_param *param)
3468 {
3469 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3470 cm_form_tid(cm_id_priv));
3471 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3472 sidr_req_msg->pkey = param->path->pkey;
3473 sidr_req_msg->service_id = param->service_id;
3474
3475 if (param->private_data && param->private_data_len)
3476 memcpy(sidr_req_msg->private_data, param->private_data,
3477 param->private_data_len);
3478 }
3479
3480 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3481 struct ib_cm_sidr_req_param *param)
3482 {
3483 struct cm_id_private *cm_id_priv;
3484 struct ib_mad_send_buf *msg;
3485 unsigned long flags;
3486 int ret;
3487
3488 if (!param->path || (param->private_data &&
3489 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3490 return -EINVAL;
3491
3492 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3493 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3494 &cm_id_priv->av,
3495 cm_id_priv);
3496 if (ret)
3497 goto out;
3498
3499 cm_id->service_id = param->service_id;
3500 cm_id->service_mask = ~cpu_to_be64(0);
3501 cm_id_priv->timeout_ms = param->timeout_ms;
3502 cm_id_priv->max_cm_retries = param->max_cm_retries;
3503 ret = cm_alloc_msg(cm_id_priv, &msg);
3504 if (ret)
3505 goto out;
3506
3507 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3508 param);
3509 msg->timeout_ms = cm_id_priv->timeout_ms;
3510 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3511
3512 spin_lock_irqsave(&cm_id_priv->lock, flags);
3513 if (cm_id->state == IB_CM_IDLE)
3514 ret = ib_post_send_mad(msg, NULL);
3515 else
3516 ret = -EINVAL;
3517
3518 if (ret) {
3519 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3520 cm_free_msg(msg);
3521 goto out;
3522 }
3523 cm_id->state = IB_CM_SIDR_REQ_SENT;
3524 cm_id_priv->msg = msg;
3525 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3526 out:
3527 return ret;
3528 }
3529 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3530
3531 static void cm_format_sidr_req_event(struct cm_work *work,
3532 const struct cm_id_private *rx_cm_id,
3533 struct ib_cm_id *listen_id)
3534 {
3535 struct cm_sidr_req_msg *sidr_req_msg;
3536 struct ib_cm_sidr_req_event_param *param;
3537
3538 sidr_req_msg = (struct cm_sidr_req_msg *)
3539 work->mad_recv_wc->recv_buf.mad;
3540 param = &work->cm_event.param.sidr_req_rcvd;
3541 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3542 param->listen_id = listen_id;
3543 param->service_id = sidr_req_msg->service_id;
3544 param->bth_pkey = cm_get_bth_pkey(work);
3545 param->port = work->port->port_num;
3546 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3547 work->cm_event.private_data = &sidr_req_msg->private_data;
3548 }
3549
3550 static int cm_sidr_req_handler(struct cm_work *work)
3551 {
3552 struct ib_cm_id *cm_id;
3553 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3554 struct cm_sidr_req_msg *sidr_req_msg;
3555 struct ib_wc *wc;
3556 int ret;
3557
3558 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3559 if (IS_ERR(cm_id))
3560 return PTR_ERR(cm_id);
3561 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3562
3563
3564 sidr_req_msg = (struct cm_sidr_req_msg *)
3565 work->mad_recv_wc->recv_buf.mad;
3566 wc = work->mad_recv_wc->wc;
3567 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3568 cm_id_priv->av.dgid.global.interface_id = 0;
3569 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3570 work->mad_recv_wc->recv_buf.grh,
3571 &cm_id_priv->av);
3572 if (ret)
3573 goto out;
3574
3575 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3576 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3577 atomic_inc(&cm_id_priv->work_count);
3578
3579 spin_lock_irq(&cm.lock);
3580 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3581 if (cur_cm_id_priv) {
3582 spin_unlock_irq(&cm.lock);
3583 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3584 counter[CM_SIDR_REQ_COUNTER]);
3585 goto out;
3586 }
3587 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3588 cur_cm_id_priv = cm_find_listen(cm_id->device,
3589 sidr_req_msg->service_id);
3590 if (!cur_cm_id_priv) {
3591 spin_unlock_irq(&cm.lock);
3592 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3593 goto out;
3594 }
3595 atomic_inc(&cur_cm_id_priv->refcount);
3596 atomic_inc(&cm_id_priv->refcount);
3597 spin_unlock_irq(&cm.lock);
3598
3599 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3600 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3601 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3602 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3603
3604 cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
3605 cm_process_work(cm_id_priv, work);
3606 cm_deref_id(cur_cm_id_priv);
3607 return 0;
3608 out:
3609 ib_destroy_cm_id(&cm_id_priv->id);
3610 return -EINVAL;
3611 }
3612
3613 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3614 struct cm_id_private *cm_id_priv,
3615 struct ib_cm_sidr_rep_param *param)
3616 {
3617 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3618 cm_id_priv->tid);
3619 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3620 sidr_rep_msg->status = param->status;
3621 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3622 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3623 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3624
3625 if (param->info && param->info_length)
3626 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3627
3628 if (param->private_data && param->private_data_len)
3629 memcpy(sidr_rep_msg->private_data, param->private_data,
3630 param->private_data_len);
3631 }
3632
3633 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3634 struct ib_cm_sidr_rep_param *param)
3635 {
3636 struct cm_id_private *cm_id_priv;
3637 struct ib_mad_send_buf *msg;
3638 unsigned long flags;
3639 int ret;
3640
3641 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3642 (param->private_data &&
3643 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3644 return -EINVAL;
3645
3646 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3647 spin_lock_irqsave(&cm_id_priv->lock, flags);
3648 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3649 ret = -EINVAL;
3650 goto error;
3651 }
3652
3653 ret = cm_alloc_msg(cm_id_priv, &msg);
3654 if (ret)
3655 goto error;
3656
3657 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3658 param);
3659 ret = ib_post_send_mad(msg, NULL);
3660 if (ret) {
3661 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3662 cm_free_msg(msg);
3663 return ret;
3664 }
3665 cm_id->state = IB_CM_IDLE;
3666 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3667
3668 spin_lock_irqsave(&cm.lock, flags);
3669 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3670 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3671 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3672 }
3673 spin_unlock_irqrestore(&cm.lock, flags);
3674 return 0;
3675
3676 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3677 return ret;
3678 }
3679 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3680
3681 static void cm_format_sidr_rep_event(struct cm_work *work,
3682 const struct cm_id_private *cm_id_priv)
3683 {
3684 struct cm_sidr_rep_msg *sidr_rep_msg;
3685 struct ib_cm_sidr_rep_event_param *param;
3686
3687 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3688 work->mad_recv_wc->recv_buf.mad;
3689 param = &work->cm_event.param.sidr_rep_rcvd;
3690 param->status = sidr_rep_msg->status;
3691 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3692 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3693 param->info = &sidr_rep_msg->info;
3694 param->info_len = sidr_rep_msg->info_length;
3695 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3696 work->cm_event.private_data = &sidr_rep_msg->private_data;
3697 }
3698
3699 static int cm_sidr_rep_handler(struct cm_work *work)
3700 {
3701 struct cm_sidr_rep_msg *sidr_rep_msg;
3702 struct cm_id_private *cm_id_priv;
3703
3704 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3705 work->mad_recv_wc->recv_buf.mad;
3706 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3707 if (!cm_id_priv)
3708 return -EINVAL;
3709
3710 spin_lock_irq(&cm_id_priv->lock);
3711 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3712 spin_unlock_irq(&cm_id_priv->lock);
3713 goto out;
3714 }
3715 cm_id_priv->id.state = IB_CM_IDLE;
3716 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3717 spin_unlock_irq(&cm_id_priv->lock);
3718
3719 cm_format_sidr_rep_event(work, cm_id_priv);
3720 cm_process_work(cm_id_priv, work);
3721 return 0;
3722 out:
3723 cm_deref_id(cm_id_priv);
3724 return -EINVAL;
3725 }
3726
3727 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3728 enum ib_wc_status wc_status)
3729 {
3730 struct cm_id_private *cm_id_priv;
3731 struct ib_cm_event cm_event;
3732 enum ib_cm_state state;
3733 int ret;
3734
3735 memset(&cm_event, 0, sizeof cm_event);
3736 cm_id_priv = msg->context[0];
3737
3738
3739 spin_lock_irq(&cm_id_priv->lock);
3740 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3741 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3742 goto discard;
3743
3744 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3745 state, ib_wc_status_msg(wc_status));
3746 switch (state) {
3747 case IB_CM_REQ_SENT:
3748 case IB_CM_MRA_REQ_RCVD:
3749 cm_reset_to_idle(cm_id_priv);
3750 cm_event.event = IB_CM_REQ_ERROR;
3751 break;
3752 case IB_CM_REP_SENT:
3753 case IB_CM_MRA_REP_RCVD:
3754 cm_reset_to_idle(cm_id_priv);
3755 cm_event.event = IB_CM_REP_ERROR;
3756 break;
3757 case IB_CM_DREQ_SENT:
3758 cm_enter_timewait(cm_id_priv);
3759 cm_event.event = IB_CM_DREQ_ERROR;
3760 break;
3761 case IB_CM_SIDR_REQ_SENT:
3762 cm_id_priv->id.state = IB_CM_IDLE;
3763 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3764 break;
3765 default:
3766 goto discard;
3767 }
3768 spin_unlock_irq(&cm_id_priv->lock);
3769 cm_event.param.send_status = wc_status;
3770
3771
3772 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3773 cm_free_msg(msg);
3774 if (ret)
3775 ib_destroy_cm_id(&cm_id_priv->id);
3776 return;
3777 discard:
3778 spin_unlock_irq(&cm_id_priv->lock);
3779 cm_free_msg(msg);
3780 }
3781
3782 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3783 struct ib_mad_send_wc *mad_send_wc)
3784 {
3785 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3786 struct cm_port *port;
3787 u16 attr_index;
3788
3789 port = mad_agent->context;
3790 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3791 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3792
3793
3794
3795
3796
3797
3798 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3799 msg->retries = 1;
3800
3801 atomic_long_add(1 + msg->retries,
3802 &port->counter_group[CM_XMIT].counter[attr_index]);
3803 if (msg->retries)
3804 atomic_long_add(msg->retries,
3805 &port->counter_group[CM_XMIT_RETRIES].
3806 counter[attr_index]);
3807
3808 switch (mad_send_wc->status) {
3809 case IB_WC_SUCCESS:
3810 case IB_WC_WR_FLUSH_ERR:
3811 cm_free_msg(msg);
3812 break;
3813 default:
3814 if (msg->context[0] && msg->context[1])
3815 cm_process_send_error(msg, mad_send_wc->status);
3816 else
3817 cm_free_msg(msg);
3818 break;
3819 }
3820 }
3821
3822 static void cm_work_handler(struct work_struct *_work)
3823 {
3824 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3825 int ret;
3826
3827 switch (work->cm_event.event) {
3828 case IB_CM_REQ_RECEIVED:
3829 ret = cm_req_handler(work);
3830 break;
3831 case IB_CM_MRA_RECEIVED:
3832 ret = cm_mra_handler(work);
3833 break;
3834 case IB_CM_REJ_RECEIVED:
3835 ret = cm_rej_handler(work);
3836 break;
3837 case IB_CM_REP_RECEIVED:
3838 ret = cm_rep_handler(work);
3839 break;
3840 case IB_CM_RTU_RECEIVED:
3841 ret = cm_rtu_handler(work);
3842 break;
3843 case IB_CM_USER_ESTABLISHED:
3844 ret = cm_establish_handler(work);
3845 break;
3846 case IB_CM_DREQ_RECEIVED:
3847 ret = cm_dreq_handler(work);
3848 break;
3849 case IB_CM_DREP_RECEIVED:
3850 ret = cm_drep_handler(work);
3851 break;
3852 case IB_CM_SIDR_REQ_RECEIVED:
3853 ret = cm_sidr_req_handler(work);
3854 break;
3855 case IB_CM_SIDR_REP_RECEIVED:
3856 ret = cm_sidr_rep_handler(work);
3857 break;
3858 case IB_CM_LAP_RECEIVED:
3859 ret = cm_lap_handler(work);
3860 break;
3861 case IB_CM_APR_RECEIVED:
3862 ret = cm_apr_handler(work);
3863 break;
3864 case IB_CM_TIMEWAIT_EXIT:
3865 ret = cm_timewait_handler(work);
3866 break;
3867 default:
3868 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3869 ret = -EINVAL;
3870 break;
3871 }
3872 if (ret)
3873 cm_free_work(work);
3874 }
3875
3876 static int cm_establish(struct ib_cm_id *cm_id)
3877 {
3878 struct cm_id_private *cm_id_priv;
3879 struct cm_work *work;
3880 unsigned long flags;
3881 int ret = 0;
3882 struct cm_device *cm_dev;
3883
3884 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3885 if (!cm_dev)
3886 return -ENODEV;
3887
3888 work = kmalloc(sizeof *work, GFP_ATOMIC);
3889 if (!work)
3890 return -ENOMEM;
3891
3892 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3893 spin_lock_irqsave(&cm_id_priv->lock, flags);
3894 switch (cm_id->state)
3895 {
3896 case IB_CM_REP_SENT:
3897 case IB_CM_MRA_REP_RCVD:
3898 cm_id->state = IB_CM_ESTABLISHED;
3899 break;
3900 case IB_CM_ESTABLISHED:
3901 ret = -EISCONN;
3902 break;
3903 default:
3904 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3905 be32_to_cpu(cm_id->local_id), cm_id->state);
3906 ret = -EINVAL;
3907 break;
3908 }
3909 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3910
3911 if (ret) {
3912 kfree(work);
3913 goto out;
3914 }
3915
3916
3917
3918
3919
3920
3921
3922 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3923 work->local_id = cm_id->local_id;
3924 work->remote_id = cm_id->remote_id;
3925 work->mad_recv_wc = NULL;
3926 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3927
3928
3929 spin_lock_irqsave(&cm.lock, flags);
3930 if (!cm_dev->going_down) {
3931 queue_delayed_work(cm.wq, &work->work, 0);
3932 } else {
3933 kfree(work);
3934 ret = -ENODEV;
3935 }
3936 spin_unlock_irqrestore(&cm.lock, flags);
3937
3938 out:
3939 return ret;
3940 }
3941
3942 static int cm_migrate(struct ib_cm_id *cm_id)
3943 {
3944 struct cm_id_private *cm_id_priv;
3945 struct cm_av tmp_av;
3946 unsigned long flags;
3947 int tmp_send_port_not_ready;
3948 int ret = 0;
3949
3950 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3951 spin_lock_irqsave(&cm_id_priv->lock, flags);
3952 if (cm_id->state == IB_CM_ESTABLISHED &&
3953 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3954 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3955 cm_id->lap_state = IB_CM_LAP_IDLE;
3956
3957 tmp_av = cm_id_priv->av;
3958 cm_id_priv->av = cm_id_priv->alt_av;
3959 cm_id_priv->alt_av = tmp_av;
3960
3961 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3962 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3963 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3964 } else
3965 ret = -EINVAL;
3966 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3967
3968 return ret;
3969 }
3970
3971 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3972 {
3973 int ret;
3974
3975 switch (event) {
3976 case IB_EVENT_COMM_EST:
3977 ret = cm_establish(cm_id);
3978 break;
3979 case IB_EVENT_PATH_MIG:
3980 ret = cm_migrate(cm_id);
3981 break;
3982 default:
3983 ret = -EINVAL;
3984 }
3985 return ret;
3986 }
3987 EXPORT_SYMBOL(ib_cm_notify);
3988
3989 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3990 struct ib_mad_send_buf *send_buf,
3991 struct ib_mad_recv_wc *mad_recv_wc)
3992 {
3993 struct cm_port *port = mad_agent->context;
3994 struct cm_work *work;
3995 enum ib_cm_event_type event;
3996 bool alt_path = false;
3997 u16 attr_id;
3998 int paths = 0;
3999 int going_down = 0;
4000
4001 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4002 case CM_REQ_ATTR_ID:
4003 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4004 mad_recv_wc->recv_buf.mad);
4005 paths = 1 + (alt_path != 0);
4006 event = IB_CM_REQ_RECEIVED;
4007 break;
4008 case CM_MRA_ATTR_ID:
4009 event = IB_CM_MRA_RECEIVED;
4010 break;
4011 case CM_REJ_ATTR_ID:
4012 event = IB_CM_REJ_RECEIVED;
4013 break;
4014 case CM_REP_ATTR_ID:
4015 event = IB_CM_REP_RECEIVED;
4016 break;
4017 case CM_RTU_ATTR_ID:
4018 event = IB_CM_RTU_RECEIVED;
4019 break;
4020 case CM_DREQ_ATTR_ID:
4021 event = IB_CM_DREQ_RECEIVED;
4022 break;
4023 case CM_DREP_ATTR_ID:
4024 event = IB_CM_DREP_RECEIVED;
4025 break;
4026 case CM_SIDR_REQ_ATTR_ID:
4027 event = IB_CM_SIDR_REQ_RECEIVED;
4028 break;
4029 case CM_SIDR_REP_ATTR_ID:
4030 event = IB_CM_SIDR_REP_RECEIVED;
4031 break;
4032 case CM_LAP_ATTR_ID:
4033 paths = 1;
4034 event = IB_CM_LAP_RECEIVED;
4035 break;
4036 case CM_APR_ATTR_ID:
4037 event = IB_CM_APR_RECEIVED;
4038 break;
4039 default:
4040 ib_free_recv_mad(mad_recv_wc);
4041 return;
4042 }
4043
4044 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4045 atomic_long_inc(&port->counter_group[CM_RECV].
4046 counter[attr_id - CM_ATTR_ID_OFFSET]);
4047
4048 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4049 if (!work) {
4050 ib_free_recv_mad(mad_recv_wc);
4051 return;
4052 }
4053
4054 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4055 work->cm_event.event = event;
4056 work->mad_recv_wc = mad_recv_wc;
4057 work->port = port;
4058
4059
4060 spin_lock_irq(&cm.lock);
4061 if (!port->cm_dev->going_down)
4062 queue_delayed_work(cm.wq, &work->work, 0);
4063 else
4064 going_down = 1;
4065 spin_unlock_irq(&cm.lock);
4066
4067 if (going_down) {
4068 kfree(work);
4069 ib_free_recv_mad(mad_recv_wc);
4070 }
4071 }
4072
4073 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4074 struct ib_qp_attr *qp_attr,
4075 int *qp_attr_mask)
4076 {
4077 unsigned long flags;
4078 int ret;
4079
4080 spin_lock_irqsave(&cm_id_priv->lock, flags);
4081 switch (cm_id_priv->id.state) {
4082 case IB_CM_REQ_SENT:
4083 case IB_CM_MRA_REQ_RCVD:
4084 case IB_CM_REQ_RCVD:
4085 case IB_CM_MRA_REQ_SENT:
4086 case IB_CM_REP_RCVD:
4087 case IB_CM_MRA_REP_SENT:
4088 case IB_CM_REP_SENT:
4089 case IB_CM_MRA_REP_RCVD:
4090 case IB_CM_ESTABLISHED:
4091 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4092 IB_QP_PKEY_INDEX | IB_QP_PORT;
4093 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4094 if (cm_id_priv->responder_resources)
4095 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4096 IB_ACCESS_REMOTE_ATOMIC;
4097 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4098 qp_attr->port_num = cm_id_priv->av.port->port_num;
4099 ret = 0;
4100 break;
4101 default:
4102 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4103 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4104 cm_id_priv->id.state);
4105 ret = -EINVAL;
4106 break;
4107 }
4108 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4109 return ret;
4110 }
4111
4112 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4113 struct ib_qp_attr *qp_attr,
4114 int *qp_attr_mask)
4115 {
4116 unsigned long flags;
4117 int ret;
4118
4119 spin_lock_irqsave(&cm_id_priv->lock, flags);
4120 switch (cm_id_priv->id.state) {
4121 case IB_CM_REQ_RCVD:
4122 case IB_CM_MRA_REQ_SENT:
4123 case IB_CM_REP_RCVD:
4124 case IB_CM_MRA_REP_SENT:
4125 case IB_CM_REP_SENT:
4126 case IB_CM_MRA_REP_RCVD:
4127 case IB_CM_ESTABLISHED:
4128 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4129 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4130 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4131 qp_attr->path_mtu = cm_id_priv->path_mtu;
4132 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4133 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4134 if (cm_id_priv->qp_type == IB_QPT_RC ||
4135 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4136 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4137 IB_QP_MIN_RNR_TIMER;
4138 qp_attr->max_dest_rd_atomic =
4139 cm_id_priv->responder_resources;
4140 qp_attr->min_rnr_timer = 0;
4141 }
4142 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4143 *qp_attr_mask |= IB_QP_ALT_PATH;
4144 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4145 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4146 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4147 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4148 }
4149 ret = 0;
4150 break;
4151 default:
4152 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4153 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4154 cm_id_priv->id.state);
4155 ret = -EINVAL;
4156 break;
4157 }
4158 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4159 return ret;
4160 }
4161
4162 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4163 struct ib_qp_attr *qp_attr,
4164 int *qp_attr_mask)
4165 {
4166 unsigned long flags;
4167 int ret;
4168
4169 spin_lock_irqsave(&cm_id_priv->lock, flags);
4170 switch (cm_id_priv->id.state) {
4171
4172 case IB_CM_REQ_RCVD:
4173 case IB_CM_MRA_REQ_SENT:
4174
4175 case IB_CM_REP_RCVD:
4176 case IB_CM_MRA_REP_SENT:
4177 case IB_CM_REP_SENT:
4178 case IB_CM_MRA_REP_RCVD:
4179 case IB_CM_ESTABLISHED:
4180 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4181 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4182 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4183 switch (cm_id_priv->qp_type) {
4184 case IB_QPT_RC:
4185 case IB_QPT_XRC_INI:
4186 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4187 IB_QP_MAX_QP_RD_ATOMIC;
4188 qp_attr->retry_cnt = cm_id_priv->retry_count;
4189 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4190 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4191
4192 case IB_QPT_XRC_TGT:
4193 *qp_attr_mask |= IB_QP_TIMEOUT;
4194 qp_attr->timeout = cm_id_priv->av.timeout;
4195 break;
4196 default:
4197 break;
4198 }
4199 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4200 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4201 qp_attr->path_mig_state = IB_MIG_REARM;
4202 }
4203 } else {
4204 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4205 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4206 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4207 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4208 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4209 qp_attr->path_mig_state = IB_MIG_REARM;
4210 }
4211 ret = 0;
4212 break;
4213 default:
4214 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4215 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4216 cm_id_priv->id.state);
4217 ret = -EINVAL;
4218 break;
4219 }
4220 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4221 return ret;
4222 }
4223
4224 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4225 struct ib_qp_attr *qp_attr,
4226 int *qp_attr_mask)
4227 {
4228 struct cm_id_private *cm_id_priv;
4229 int ret;
4230
4231 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4232 switch (qp_attr->qp_state) {
4233 case IB_QPS_INIT:
4234 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4235 break;
4236 case IB_QPS_RTR:
4237 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4238 break;
4239 case IB_QPS_RTS:
4240 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4241 break;
4242 default:
4243 ret = -EINVAL;
4244 break;
4245 }
4246 return ret;
4247 }
4248 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4249
4250 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4251 char *buf)
4252 {
4253 struct cm_counter_group *group;
4254 struct cm_counter_attribute *cm_attr;
4255
4256 group = container_of(obj, struct cm_counter_group, obj);
4257 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4258
4259 return sprintf(buf, "%ld\n",
4260 atomic_long_read(&group->counter[cm_attr->index]));
4261 }
4262
4263 static const struct sysfs_ops cm_counter_ops = {
4264 .show = cm_show_counter
4265 };
4266
4267 static struct kobj_type cm_counter_obj_type = {
4268 .sysfs_ops = &cm_counter_ops,
4269 .default_attrs = cm_counter_default_attrs
4270 };
4271
4272 static char *cm_devnode(struct device *dev, umode_t *mode)
4273 {
4274 if (mode)
4275 *mode = 0666;
4276 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4277 }
4278
4279 struct class cm_class = {
4280 .owner = THIS_MODULE,
4281 .name = "infiniband_cm",
4282 .devnode = cm_devnode,
4283 };
4284 EXPORT_SYMBOL(cm_class);
4285
4286 static int cm_create_port_fs(struct cm_port *port)
4287 {
4288 int i, ret;
4289
4290 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4291 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4292 port->port_num,
4293 &port->counter_group[i].obj,
4294 &cm_counter_obj_type,
4295 counter_group_names[i]);
4296 if (ret)
4297 goto error;
4298 }
4299
4300 return 0;
4301
4302 error:
4303 while (i--)
4304 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4305 return ret;
4306
4307 }
4308
4309 static void cm_remove_port_fs(struct cm_port *port)
4310 {
4311 int i;
4312
4313 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4314 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4315
4316 }
4317
4318 static void cm_add_one(struct ib_device *ib_device)
4319 {
4320 struct cm_device *cm_dev;
4321 struct cm_port *port;
4322 struct ib_mad_reg_req reg_req = {
4323 .mgmt_class = IB_MGMT_CLASS_CM,
4324 .mgmt_class_version = IB_CM_CLASS_VERSION,
4325 };
4326 struct ib_port_modify port_modify = {
4327 .set_port_cap_mask = IB_PORT_CM_SUP
4328 };
4329 unsigned long flags;
4330 int ret;
4331 int count = 0;
4332 u8 i;
4333
4334 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4335 GFP_KERNEL);
4336 if (!cm_dev)
4337 return;
4338
4339 cm_dev->ib_device = ib_device;
4340 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4341 cm_dev->going_down = 0;
4342
4343 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4344 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4345 if (!rdma_cap_ib_cm(ib_device, i))
4346 continue;
4347
4348 port = kzalloc(sizeof *port, GFP_KERNEL);
4349 if (!port)
4350 goto error1;
4351
4352 cm_dev->port[i-1] = port;
4353 port->cm_dev = cm_dev;
4354 port->port_num = i;
4355
4356 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4357 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4358
4359 ret = cm_create_port_fs(port);
4360 if (ret)
4361 goto error1;
4362
4363 port->mad_agent = ib_register_mad_agent(ib_device, i,
4364 IB_QPT_GSI,
4365 ®_req,
4366 0,
4367 cm_send_handler,
4368 cm_recv_handler,
4369 port,
4370 0);
4371 if (IS_ERR(port->mad_agent))
4372 goto error2;
4373
4374 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4375 if (ret)
4376 goto error3;
4377
4378 count++;
4379 }
4380
4381 if (!count)
4382 goto free;
4383
4384 ib_set_client_data(ib_device, &cm_client, cm_dev);
4385
4386 write_lock_irqsave(&cm.device_lock, flags);
4387 list_add_tail(&cm_dev->list, &cm.device_list);
4388 write_unlock_irqrestore(&cm.device_lock, flags);
4389 return;
4390
4391 error3:
4392 ib_unregister_mad_agent(port->mad_agent);
4393 error2:
4394 cm_remove_port_fs(port);
4395 error1:
4396 port_modify.set_port_cap_mask = 0;
4397 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4398 kfree(port);
4399 while (--i) {
4400 if (!rdma_cap_ib_cm(ib_device, i))
4401 continue;
4402
4403 port = cm_dev->port[i-1];
4404 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4405 ib_unregister_mad_agent(port->mad_agent);
4406 cm_remove_port_fs(port);
4407 kfree(port);
4408 }
4409 free:
4410 kfree(cm_dev);
4411 }
4412
4413 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4414 {
4415 struct cm_device *cm_dev = client_data;
4416 struct cm_port *port;
4417 struct cm_id_private *cm_id_priv;
4418 struct ib_mad_agent *cur_mad_agent;
4419 struct ib_port_modify port_modify = {
4420 .clr_port_cap_mask = IB_PORT_CM_SUP
4421 };
4422 unsigned long flags;
4423 int i;
4424
4425 if (!cm_dev)
4426 return;
4427
4428 write_lock_irqsave(&cm.device_lock, flags);
4429 list_del(&cm_dev->list);
4430 write_unlock_irqrestore(&cm.device_lock, flags);
4431
4432 spin_lock_irq(&cm.lock);
4433 cm_dev->going_down = 1;
4434 spin_unlock_irq(&cm.lock);
4435
4436 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4437 if (!rdma_cap_ib_cm(ib_device, i))
4438 continue;
4439
4440 port = cm_dev->port[i-1];
4441 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4442
4443 spin_lock_irq(&cm.lock);
4444 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4445 cm_id_priv->altr_send_port_not_ready = 1;
4446 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4447 cm_id_priv->prim_send_port_not_ready = 1;
4448 spin_unlock_irq(&cm.lock);
4449
4450
4451
4452
4453
4454 flush_workqueue(cm.wq);
4455 spin_lock_irq(&cm.state_lock);
4456 cur_mad_agent = port->mad_agent;
4457 port->mad_agent = NULL;
4458 spin_unlock_irq(&cm.state_lock);
4459 ib_unregister_mad_agent(cur_mad_agent);
4460 cm_remove_port_fs(port);
4461 kfree(port);
4462 }
4463
4464 kfree(cm_dev);
4465 }
4466
4467 static int __init ib_cm_init(void)
4468 {
4469 int ret;
4470
4471 INIT_LIST_HEAD(&cm.device_list);
4472 rwlock_init(&cm.device_lock);
4473 spin_lock_init(&cm.lock);
4474 spin_lock_init(&cm.state_lock);
4475 cm.listen_service_table = RB_ROOT;
4476 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4477 cm.remote_id_table = RB_ROOT;
4478 cm.remote_qp_table = RB_ROOT;
4479 cm.remote_sidr_table = RB_ROOT;
4480 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4481 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4482 INIT_LIST_HEAD(&cm.timewait_list);
4483
4484 ret = class_register(&cm_class);
4485 if (ret) {
4486 ret = -ENOMEM;
4487 goto error1;
4488 }
4489
4490 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4491 if (!cm.wq) {
4492 ret = -ENOMEM;
4493 goto error2;
4494 }
4495
4496 ret = ib_register_client(&cm_client);
4497 if (ret)
4498 goto error3;
4499
4500 return 0;
4501 error3:
4502 destroy_workqueue(cm.wq);
4503 error2:
4504 class_unregister(&cm_class);
4505 error1:
4506 return ret;
4507 }
4508
4509 static void __exit ib_cm_cleanup(void)
4510 {
4511 struct cm_timewait_info *timewait_info, *tmp;
4512
4513 spin_lock_irq(&cm.lock);
4514 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4515 cancel_delayed_work(&timewait_info->work.work);
4516 spin_unlock_irq(&cm.lock);
4517
4518 ib_unregister_client(&cm_client);
4519 destroy_workqueue(cm.wq);
4520
4521 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4522 list_del(&timewait_info->list);
4523 kfree(timewait_info);
4524 }
4525
4526 class_unregister(&cm_class);
4527 WARN_ON(!xa_empty(&cm.local_id_table));
4528 }
4529
4530 module_init(ib_cm_init);
4531 module_exit(ib_cm_cleanup);