This source file includes following definitions.
- ib_sa_disable_local_svc
- ib_sa_query_cancelled
- ib_nl_set_path_rec_attrs
- ib_nl_get_path_rec_attrs_len
- ib_nl_send_msg
- ib_nl_make_request
- ib_nl_cancel_request
- ib_nl_process_good_resolve_rsp
- ib_nl_request_timeout
- ib_nl_handle_set_timeout
- ib_nl_is_good_resolve_resp
- ib_nl_handle_resolve_resp
- free_sm_ah
- ib_sa_register_client
- ib_sa_unregister_client
- ib_sa_cancel_query
- get_src_path_mask
- init_ah_attr_grh_fields
- ib_init_ah_attr_from_path
- alloc_mad
- free_mad
- init_mad
- send_mad
- ib_sa_unpack_path
- ib_sa_pack_path
- ib_sa_opa_pathrecord_support
- opa_pr_query_possible
- ib_sa_path_rec_callback
- ib_sa_path_rec_release
- ib_sa_path_rec_get
- ib_sa_service_rec_callback
- ib_sa_service_rec_release
- ib_sa_service_rec_query
- ib_sa_mcmember_rec_callback
- ib_sa_mcmember_rec_release
- ib_sa_mcmember_rec_query
- ib_sa_guidinfo_rec_callback
- ib_sa_guidinfo_rec_release
- ib_sa_guid_info_rec_query
- ib_sa_sendonly_fullmem_support
- ib_classportinfo_cb
- ib_sa_classport_info_rec_callback
- ib_sa_classport_info_rec_release
- ib_sa_classport_info_rec_query
- update_ib_cpi
- send_handler
- recv_handler
- update_sm_ah
- ib_sa_event
- ib_sa_add_one
- ib_sa_remove_one
- ib_sa_init
- ib_sa_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/err.h>
38 #include <linux/random.h>
39 #include <linux/spinlock.h>
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kref.h>
43 #include <linux/xarray.h>
44 #include <linux/workqueue.h>
45 #include <uapi/linux/if_ether.h>
46 #include <rdma/ib_pack.h>
47 #include <rdma/ib_cache.h>
48 #include <rdma/rdma_netlink.h>
49 #include <net/netlink.h>
50 #include <uapi/rdma/ib_user_sa.h>
51 #include <rdma/ib_marshall.h>
52 #include <rdma/ib_addr.h>
53 #include <rdma/opa_addr.h>
54 #include "sa.h"
55 #include "core_priv.h"
56
57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
60 #define IB_SA_CPI_MAX_RETRY_CNT 3
61 #define IB_SA_CPI_RETRY_WAIT 1000
62 static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
63
64 struct ib_sa_sm_ah {
65 struct ib_ah *ah;
66 struct kref ref;
67 u16 pkey_index;
68 u8 src_path_mask;
69 };
70
71 enum rdma_class_port_info_type {
72 RDMA_CLASS_PORT_INFO_IB,
73 RDMA_CLASS_PORT_INFO_OPA
74 };
75
76 struct rdma_class_port_info {
77 enum rdma_class_port_info_type type;
78 union {
79 struct ib_class_port_info ib;
80 struct opa_class_port_info opa;
81 };
82 };
83
84 struct ib_sa_classport_cache {
85 bool valid;
86 int retry_cnt;
87 struct rdma_class_port_info data;
88 };
89
90 struct ib_sa_port {
91 struct ib_mad_agent *agent;
92 struct ib_sa_sm_ah *sm_ah;
93 struct work_struct update_task;
94 struct ib_sa_classport_cache classport_info;
95 struct delayed_work ib_cpi_work;
96 spinlock_t classport_lock;
97 spinlock_t ah_lock;
98 u8 port_num;
99 };
100
101 struct ib_sa_device {
102 int start_port, end_port;
103 struct ib_event_handler event_handler;
104 struct ib_sa_port port[0];
105 };
106
107 struct ib_sa_query {
108 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
109 void (*release)(struct ib_sa_query *);
110 struct ib_sa_client *client;
111 struct ib_sa_port *port;
112 struct ib_mad_send_buf *mad_buf;
113 struct ib_sa_sm_ah *sm_ah;
114 int id;
115 u32 flags;
116 struct list_head list;
117 u32 seq;
118 unsigned long timeout;
119 u8 path_use;
120 };
121
122 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
123 #define IB_SA_CANCEL 0x00000002
124 #define IB_SA_QUERY_OPA 0x00000004
125
126 struct ib_sa_service_query {
127 void (*callback)(int, struct ib_sa_service_rec *, void *);
128 void *context;
129 struct ib_sa_query sa_query;
130 };
131
132 struct ib_sa_path_query {
133 void (*callback)(int, struct sa_path_rec *, void *);
134 void *context;
135 struct ib_sa_query sa_query;
136 struct sa_path_rec *conv_pr;
137 };
138
139 struct ib_sa_guidinfo_query {
140 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
141 void *context;
142 struct ib_sa_query sa_query;
143 };
144
145 struct ib_sa_classport_info_query {
146 void (*callback)(void *);
147 void *context;
148 struct ib_sa_query sa_query;
149 };
150
151 struct ib_sa_mcmember_query {
152 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
153 void *context;
154 struct ib_sa_query sa_query;
155 };
156
157 static LIST_HEAD(ib_nl_request_list);
158 static DEFINE_SPINLOCK(ib_nl_request_lock);
159 static atomic_t ib_nl_sa_request_seq;
160 static struct workqueue_struct *ib_nl_wq;
161 static struct delayed_work ib_nl_timed_work;
162 static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
163 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
164 .len = sizeof(struct ib_path_rec_data)},
165 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
166 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
167 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
168 .len = sizeof(struct rdma_nla_ls_gid)},
169 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
170 .len = sizeof(struct rdma_nla_ls_gid)},
171 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
172 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
173 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
174 };
175
176
177 static void ib_sa_add_one(struct ib_device *device);
178 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
179
180 static struct ib_client sa_client = {
181 .name = "sa",
182 .add = ib_sa_add_one,
183 .remove = ib_sa_remove_one
184 };
185
186 static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
187
188 static DEFINE_SPINLOCK(tid_lock);
189 static u32 tid;
190
191 #define PATH_REC_FIELD(field) \
192 .struct_offset_bytes = offsetof(struct sa_path_rec, field), \
193 .struct_size_bytes = sizeof((struct sa_path_rec *)0)->field, \
194 .field_name = "sa_path_rec:" #field
195
196 static const struct ib_field path_rec_table[] = {
197 { PATH_REC_FIELD(service_id),
198 .offset_words = 0,
199 .offset_bits = 0,
200 .size_bits = 64 },
201 { PATH_REC_FIELD(dgid),
202 .offset_words = 2,
203 .offset_bits = 0,
204 .size_bits = 128 },
205 { PATH_REC_FIELD(sgid),
206 .offset_words = 6,
207 .offset_bits = 0,
208 .size_bits = 128 },
209 { PATH_REC_FIELD(ib.dlid),
210 .offset_words = 10,
211 .offset_bits = 0,
212 .size_bits = 16 },
213 { PATH_REC_FIELD(ib.slid),
214 .offset_words = 10,
215 .offset_bits = 16,
216 .size_bits = 16 },
217 { PATH_REC_FIELD(ib.raw_traffic),
218 .offset_words = 11,
219 .offset_bits = 0,
220 .size_bits = 1 },
221 { RESERVED,
222 .offset_words = 11,
223 .offset_bits = 1,
224 .size_bits = 3 },
225 { PATH_REC_FIELD(flow_label),
226 .offset_words = 11,
227 .offset_bits = 4,
228 .size_bits = 20 },
229 { PATH_REC_FIELD(hop_limit),
230 .offset_words = 11,
231 .offset_bits = 24,
232 .size_bits = 8 },
233 { PATH_REC_FIELD(traffic_class),
234 .offset_words = 12,
235 .offset_bits = 0,
236 .size_bits = 8 },
237 { PATH_REC_FIELD(reversible),
238 .offset_words = 12,
239 .offset_bits = 8,
240 .size_bits = 1 },
241 { PATH_REC_FIELD(numb_path),
242 .offset_words = 12,
243 .offset_bits = 9,
244 .size_bits = 7 },
245 { PATH_REC_FIELD(pkey),
246 .offset_words = 12,
247 .offset_bits = 16,
248 .size_bits = 16 },
249 { PATH_REC_FIELD(qos_class),
250 .offset_words = 13,
251 .offset_bits = 0,
252 .size_bits = 12 },
253 { PATH_REC_FIELD(sl),
254 .offset_words = 13,
255 .offset_bits = 12,
256 .size_bits = 4 },
257 { PATH_REC_FIELD(mtu_selector),
258 .offset_words = 13,
259 .offset_bits = 16,
260 .size_bits = 2 },
261 { PATH_REC_FIELD(mtu),
262 .offset_words = 13,
263 .offset_bits = 18,
264 .size_bits = 6 },
265 { PATH_REC_FIELD(rate_selector),
266 .offset_words = 13,
267 .offset_bits = 24,
268 .size_bits = 2 },
269 { PATH_REC_FIELD(rate),
270 .offset_words = 13,
271 .offset_bits = 26,
272 .size_bits = 6 },
273 { PATH_REC_FIELD(packet_life_time_selector),
274 .offset_words = 14,
275 .offset_bits = 0,
276 .size_bits = 2 },
277 { PATH_REC_FIELD(packet_life_time),
278 .offset_words = 14,
279 .offset_bits = 2,
280 .size_bits = 6 },
281 { PATH_REC_FIELD(preference),
282 .offset_words = 14,
283 .offset_bits = 8,
284 .size_bits = 8 },
285 { RESERVED,
286 .offset_words = 14,
287 .offset_bits = 16,
288 .size_bits = 48 },
289 };
290
291 #define OPA_PATH_REC_FIELD(field) \
292 .struct_offset_bytes = \
293 offsetof(struct sa_path_rec, field), \
294 .struct_size_bytes = \
295 sizeof((struct sa_path_rec *)0)->field, \
296 .field_name = "sa_path_rec:" #field
297
298 static const struct ib_field opa_path_rec_table[] = {
299 { OPA_PATH_REC_FIELD(service_id),
300 .offset_words = 0,
301 .offset_bits = 0,
302 .size_bits = 64 },
303 { OPA_PATH_REC_FIELD(dgid),
304 .offset_words = 2,
305 .offset_bits = 0,
306 .size_bits = 128 },
307 { OPA_PATH_REC_FIELD(sgid),
308 .offset_words = 6,
309 .offset_bits = 0,
310 .size_bits = 128 },
311 { OPA_PATH_REC_FIELD(opa.dlid),
312 .offset_words = 10,
313 .offset_bits = 0,
314 .size_bits = 32 },
315 { OPA_PATH_REC_FIELD(opa.slid),
316 .offset_words = 11,
317 .offset_bits = 0,
318 .size_bits = 32 },
319 { OPA_PATH_REC_FIELD(opa.raw_traffic),
320 .offset_words = 12,
321 .offset_bits = 0,
322 .size_bits = 1 },
323 { RESERVED,
324 .offset_words = 12,
325 .offset_bits = 1,
326 .size_bits = 3 },
327 { OPA_PATH_REC_FIELD(flow_label),
328 .offset_words = 12,
329 .offset_bits = 4,
330 .size_bits = 20 },
331 { OPA_PATH_REC_FIELD(hop_limit),
332 .offset_words = 12,
333 .offset_bits = 24,
334 .size_bits = 8 },
335 { OPA_PATH_REC_FIELD(traffic_class),
336 .offset_words = 13,
337 .offset_bits = 0,
338 .size_bits = 8 },
339 { OPA_PATH_REC_FIELD(reversible),
340 .offset_words = 13,
341 .offset_bits = 8,
342 .size_bits = 1 },
343 { OPA_PATH_REC_FIELD(numb_path),
344 .offset_words = 13,
345 .offset_bits = 9,
346 .size_bits = 7 },
347 { OPA_PATH_REC_FIELD(pkey),
348 .offset_words = 13,
349 .offset_bits = 16,
350 .size_bits = 16 },
351 { OPA_PATH_REC_FIELD(opa.l2_8B),
352 .offset_words = 14,
353 .offset_bits = 0,
354 .size_bits = 1 },
355 { OPA_PATH_REC_FIELD(opa.l2_10B),
356 .offset_words = 14,
357 .offset_bits = 1,
358 .size_bits = 1 },
359 { OPA_PATH_REC_FIELD(opa.l2_9B),
360 .offset_words = 14,
361 .offset_bits = 2,
362 .size_bits = 1 },
363 { OPA_PATH_REC_FIELD(opa.l2_16B),
364 .offset_words = 14,
365 .offset_bits = 3,
366 .size_bits = 1 },
367 { RESERVED,
368 .offset_words = 14,
369 .offset_bits = 4,
370 .size_bits = 2 },
371 { OPA_PATH_REC_FIELD(opa.qos_type),
372 .offset_words = 14,
373 .offset_bits = 6,
374 .size_bits = 2 },
375 { OPA_PATH_REC_FIELD(opa.qos_priority),
376 .offset_words = 14,
377 .offset_bits = 8,
378 .size_bits = 8 },
379 { RESERVED,
380 .offset_words = 14,
381 .offset_bits = 16,
382 .size_bits = 3 },
383 { OPA_PATH_REC_FIELD(sl),
384 .offset_words = 14,
385 .offset_bits = 19,
386 .size_bits = 5 },
387 { RESERVED,
388 .offset_words = 14,
389 .offset_bits = 24,
390 .size_bits = 8 },
391 { OPA_PATH_REC_FIELD(mtu_selector),
392 .offset_words = 15,
393 .offset_bits = 0,
394 .size_bits = 2 },
395 { OPA_PATH_REC_FIELD(mtu),
396 .offset_words = 15,
397 .offset_bits = 2,
398 .size_bits = 6 },
399 { OPA_PATH_REC_FIELD(rate_selector),
400 .offset_words = 15,
401 .offset_bits = 8,
402 .size_bits = 2 },
403 { OPA_PATH_REC_FIELD(rate),
404 .offset_words = 15,
405 .offset_bits = 10,
406 .size_bits = 6 },
407 { OPA_PATH_REC_FIELD(packet_life_time_selector),
408 .offset_words = 15,
409 .offset_bits = 16,
410 .size_bits = 2 },
411 { OPA_PATH_REC_FIELD(packet_life_time),
412 .offset_words = 15,
413 .offset_bits = 18,
414 .size_bits = 6 },
415 { OPA_PATH_REC_FIELD(preference),
416 .offset_words = 15,
417 .offset_bits = 24,
418 .size_bits = 8 },
419 };
420
421 #define MCMEMBER_REC_FIELD(field) \
422 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
423 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
424 .field_name = "sa_mcmember_rec:" #field
425
426 static const struct ib_field mcmember_rec_table[] = {
427 { MCMEMBER_REC_FIELD(mgid),
428 .offset_words = 0,
429 .offset_bits = 0,
430 .size_bits = 128 },
431 { MCMEMBER_REC_FIELD(port_gid),
432 .offset_words = 4,
433 .offset_bits = 0,
434 .size_bits = 128 },
435 { MCMEMBER_REC_FIELD(qkey),
436 .offset_words = 8,
437 .offset_bits = 0,
438 .size_bits = 32 },
439 { MCMEMBER_REC_FIELD(mlid),
440 .offset_words = 9,
441 .offset_bits = 0,
442 .size_bits = 16 },
443 { MCMEMBER_REC_FIELD(mtu_selector),
444 .offset_words = 9,
445 .offset_bits = 16,
446 .size_bits = 2 },
447 { MCMEMBER_REC_FIELD(mtu),
448 .offset_words = 9,
449 .offset_bits = 18,
450 .size_bits = 6 },
451 { MCMEMBER_REC_FIELD(traffic_class),
452 .offset_words = 9,
453 .offset_bits = 24,
454 .size_bits = 8 },
455 { MCMEMBER_REC_FIELD(pkey),
456 .offset_words = 10,
457 .offset_bits = 0,
458 .size_bits = 16 },
459 { MCMEMBER_REC_FIELD(rate_selector),
460 .offset_words = 10,
461 .offset_bits = 16,
462 .size_bits = 2 },
463 { MCMEMBER_REC_FIELD(rate),
464 .offset_words = 10,
465 .offset_bits = 18,
466 .size_bits = 6 },
467 { MCMEMBER_REC_FIELD(packet_life_time_selector),
468 .offset_words = 10,
469 .offset_bits = 24,
470 .size_bits = 2 },
471 { MCMEMBER_REC_FIELD(packet_life_time),
472 .offset_words = 10,
473 .offset_bits = 26,
474 .size_bits = 6 },
475 { MCMEMBER_REC_FIELD(sl),
476 .offset_words = 11,
477 .offset_bits = 0,
478 .size_bits = 4 },
479 { MCMEMBER_REC_FIELD(flow_label),
480 .offset_words = 11,
481 .offset_bits = 4,
482 .size_bits = 20 },
483 { MCMEMBER_REC_FIELD(hop_limit),
484 .offset_words = 11,
485 .offset_bits = 24,
486 .size_bits = 8 },
487 { MCMEMBER_REC_FIELD(scope),
488 .offset_words = 12,
489 .offset_bits = 0,
490 .size_bits = 4 },
491 { MCMEMBER_REC_FIELD(join_state),
492 .offset_words = 12,
493 .offset_bits = 4,
494 .size_bits = 4 },
495 { MCMEMBER_REC_FIELD(proxy_join),
496 .offset_words = 12,
497 .offset_bits = 8,
498 .size_bits = 1 },
499 { RESERVED,
500 .offset_words = 12,
501 .offset_bits = 9,
502 .size_bits = 23 },
503 };
504
505 #define SERVICE_REC_FIELD(field) \
506 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
507 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
508 .field_name = "sa_service_rec:" #field
509
510 static const struct ib_field service_rec_table[] = {
511 { SERVICE_REC_FIELD(id),
512 .offset_words = 0,
513 .offset_bits = 0,
514 .size_bits = 64 },
515 { SERVICE_REC_FIELD(gid),
516 .offset_words = 2,
517 .offset_bits = 0,
518 .size_bits = 128 },
519 { SERVICE_REC_FIELD(pkey),
520 .offset_words = 6,
521 .offset_bits = 0,
522 .size_bits = 16 },
523 { SERVICE_REC_FIELD(lease),
524 .offset_words = 7,
525 .offset_bits = 0,
526 .size_bits = 32 },
527 { SERVICE_REC_FIELD(key),
528 .offset_words = 8,
529 .offset_bits = 0,
530 .size_bits = 128 },
531 { SERVICE_REC_FIELD(name),
532 .offset_words = 12,
533 .offset_bits = 0,
534 .size_bits = 64*8 },
535 { SERVICE_REC_FIELD(data8),
536 .offset_words = 28,
537 .offset_bits = 0,
538 .size_bits = 16*8 },
539 { SERVICE_REC_FIELD(data16),
540 .offset_words = 32,
541 .offset_bits = 0,
542 .size_bits = 8*16 },
543 { SERVICE_REC_FIELD(data32),
544 .offset_words = 36,
545 .offset_bits = 0,
546 .size_bits = 4*32 },
547 { SERVICE_REC_FIELD(data64),
548 .offset_words = 40,
549 .offset_bits = 0,
550 .size_bits = 2*64 },
551 };
552
553 #define CLASSPORTINFO_REC_FIELD(field) \
554 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
555 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
556 .field_name = "ib_class_port_info:" #field
557
558 static const struct ib_field ib_classport_info_rec_table[] = {
559 { CLASSPORTINFO_REC_FIELD(base_version),
560 .offset_words = 0,
561 .offset_bits = 0,
562 .size_bits = 8 },
563 { CLASSPORTINFO_REC_FIELD(class_version),
564 .offset_words = 0,
565 .offset_bits = 8,
566 .size_bits = 8 },
567 { CLASSPORTINFO_REC_FIELD(capability_mask),
568 .offset_words = 0,
569 .offset_bits = 16,
570 .size_bits = 16 },
571 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
572 .offset_words = 1,
573 .offset_bits = 0,
574 .size_bits = 32 },
575 { CLASSPORTINFO_REC_FIELD(redirect_gid),
576 .offset_words = 2,
577 .offset_bits = 0,
578 .size_bits = 128 },
579 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
580 .offset_words = 6,
581 .offset_bits = 0,
582 .size_bits = 32 },
583 { CLASSPORTINFO_REC_FIELD(redirect_lid),
584 .offset_words = 7,
585 .offset_bits = 0,
586 .size_bits = 16 },
587 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
588 .offset_words = 7,
589 .offset_bits = 16,
590 .size_bits = 16 },
591
592 { CLASSPORTINFO_REC_FIELD(redirect_qp),
593 .offset_words = 8,
594 .offset_bits = 0,
595 .size_bits = 32 },
596 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
597 .offset_words = 9,
598 .offset_bits = 0,
599 .size_bits = 32 },
600
601 { CLASSPORTINFO_REC_FIELD(trap_gid),
602 .offset_words = 10,
603 .offset_bits = 0,
604 .size_bits = 128 },
605 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
606 .offset_words = 14,
607 .offset_bits = 0,
608 .size_bits = 32 },
609
610 { CLASSPORTINFO_REC_FIELD(trap_lid),
611 .offset_words = 15,
612 .offset_bits = 0,
613 .size_bits = 16 },
614 { CLASSPORTINFO_REC_FIELD(trap_pkey),
615 .offset_words = 15,
616 .offset_bits = 16,
617 .size_bits = 16 },
618
619 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
620 .offset_words = 16,
621 .offset_bits = 0,
622 .size_bits = 32 },
623 { CLASSPORTINFO_REC_FIELD(trap_qkey),
624 .offset_words = 17,
625 .offset_bits = 0,
626 .size_bits = 32 },
627 };
628
629 #define OPA_CLASSPORTINFO_REC_FIELD(field) \
630 .struct_offset_bytes =\
631 offsetof(struct opa_class_port_info, field), \
632 .struct_size_bytes = \
633 sizeof((struct opa_class_port_info *)0)->field, \
634 .field_name = "opa_class_port_info:" #field
635
636 static const struct ib_field opa_classport_info_rec_table[] = {
637 { OPA_CLASSPORTINFO_REC_FIELD(base_version),
638 .offset_words = 0,
639 .offset_bits = 0,
640 .size_bits = 8 },
641 { OPA_CLASSPORTINFO_REC_FIELD(class_version),
642 .offset_words = 0,
643 .offset_bits = 8,
644 .size_bits = 8 },
645 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask),
646 .offset_words = 0,
647 .offset_bits = 16,
648 .size_bits = 16 },
649 { OPA_CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
650 .offset_words = 1,
651 .offset_bits = 0,
652 .size_bits = 32 },
653 { OPA_CLASSPORTINFO_REC_FIELD(redirect_gid),
654 .offset_words = 2,
655 .offset_bits = 0,
656 .size_bits = 128 },
657 { OPA_CLASSPORTINFO_REC_FIELD(redirect_tc_fl),
658 .offset_words = 6,
659 .offset_bits = 0,
660 .size_bits = 32 },
661 { OPA_CLASSPORTINFO_REC_FIELD(redirect_lid),
662 .offset_words = 7,
663 .offset_bits = 0,
664 .size_bits = 32 },
665 { OPA_CLASSPORTINFO_REC_FIELD(redirect_sl_qp),
666 .offset_words = 8,
667 .offset_bits = 0,
668 .size_bits = 32 },
669 { OPA_CLASSPORTINFO_REC_FIELD(redirect_qkey),
670 .offset_words = 9,
671 .offset_bits = 0,
672 .size_bits = 32 },
673 { OPA_CLASSPORTINFO_REC_FIELD(trap_gid),
674 .offset_words = 10,
675 .offset_bits = 0,
676 .size_bits = 128 },
677 { OPA_CLASSPORTINFO_REC_FIELD(trap_tc_fl),
678 .offset_words = 14,
679 .offset_bits = 0,
680 .size_bits = 32 },
681 { OPA_CLASSPORTINFO_REC_FIELD(trap_lid),
682 .offset_words = 15,
683 .offset_bits = 0,
684 .size_bits = 32 },
685 { OPA_CLASSPORTINFO_REC_FIELD(trap_hl_qp),
686 .offset_words = 16,
687 .offset_bits = 0,
688 .size_bits = 32 },
689 { OPA_CLASSPORTINFO_REC_FIELD(trap_qkey),
690 .offset_words = 17,
691 .offset_bits = 0,
692 .size_bits = 32 },
693 { OPA_CLASSPORTINFO_REC_FIELD(trap_pkey),
694 .offset_words = 18,
695 .offset_bits = 0,
696 .size_bits = 16 },
697 { OPA_CLASSPORTINFO_REC_FIELD(redirect_pkey),
698 .offset_words = 18,
699 .offset_bits = 16,
700 .size_bits = 16 },
701 { OPA_CLASSPORTINFO_REC_FIELD(trap_sl_rsvd),
702 .offset_words = 19,
703 .offset_bits = 0,
704 .size_bits = 8 },
705 { RESERVED,
706 .offset_words = 19,
707 .offset_bits = 8,
708 .size_bits = 24 },
709 };
710
711 #define GUIDINFO_REC_FIELD(field) \
712 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
713 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
714 .field_name = "sa_guidinfo_rec:" #field
715
716 static const struct ib_field guidinfo_rec_table[] = {
717 { GUIDINFO_REC_FIELD(lid),
718 .offset_words = 0,
719 .offset_bits = 0,
720 .size_bits = 16 },
721 { GUIDINFO_REC_FIELD(block_num),
722 .offset_words = 0,
723 .offset_bits = 16,
724 .size_bits = 8 },
725 { GUIDINFO_REC_FIELD(res1),
726 .offset_words = 0,
727 .offset_bits = 24,
728 .size_bits = 8 },
729 { GUIDINFO_REC_FIELD(res2),
730 .offset_words = 1,
731 .offset_bits = 0,
732 .size_bits = 32 },
733 { GUIDINFO_REC_FIELD(guid_info_list),
734 .offset_words = 2,
735 .offset_bits = 0,
736 .size_bits = 512 },
737 };
738
739 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
740 {
741 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
742 }
743
744 static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
745 {
746 return (query->flags & IB_SA_CANCEL);
747 }
748
749 static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
750 struct ib_sa_query *query)
751 {
752 struct sa_path_rec *sa_rec = query->mad_buf->context[1];
753 struct ib_sa_mad *mad = query->mad_buf->mad;
754 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
755 u16 val16;
756 u64 val64;
757 struct rdma_ls_resolve_header *header;
758
759 query->mad_buf->context[1] = NULL;
760
761
762 header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
763 memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
764 LS_DEVICE_NAME_MAX);
765 header->port_num = query->port->port_num;
766
767 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
768 sa_rec->reversible != 0)
769 query->path_use = LS_RESOLVE_PATH_USE_GMP;
770 else
771 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
772 header->path_use = query->path_use;
773
774
775 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
776 val64 = be64_to_cpu(sa_rec->service_id);
777 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
778 sizeof(val64), &val64);
779 }
780 if (comp_mask & IB_SA_PATH_REC_DGID)
781 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
782 sizeof(sa_rec->dgid), &sa_rec->dgid);
783 if (comp_mask & IB_SA_PATH_REC_SGID)
784 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
785 sizeof(sa_rec->sgid), &sa_rec->sgid);
786 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
787 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
788 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
789
790 if (comp_mask & IB_SA_PATH_REC_PKEY) {
791 val16 = be16_to_cpu(sa_rec->pkey);
792 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
793 sizeof(val16), &val16);
794 }
795 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
796 val16 = be16_to_cpu(sa_rec->qos_class);
797 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
798 sizeof(val16), &val16);
799 }
800 }
801
802 static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
803 {
804 int len = 0;
805
806 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
807 len += nla_total_size(sizeof(u64));
808 if (comp_mask & IB_SA_PATH_REC_DGID)
809 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
810 if (comp_mask & IB_SA_PATH_REC_SGID)
811 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
812 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
813 len += nla_total_size(sizeof(u8));
814 if (comp_mask & IB_SA_PATH_REC_PKEY)
815 len += nla_total_size(sizeof(u16));
816 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
817 len += nla_total_size(sizeof(u16));
818
819
820
821
822
823 if (WARN_ON(len == 0))
824 return len;
825
826
827 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
828
829 return len;
830 }
831
832 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
833 {
834 struct sk_buff *skb = NULL;
835 struct nlmsghdr *nlh;
836 void *data;
837 struct ib_sa_mad *mad;
838 int len;
839
840 mad = query->mad_buf->mad;
841 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
842 if (len <= 0)
843 return -EMSGSIZE;
844
845 skb = nlmsg_new(len, gfp_mask);
846 if (!skb)
847 return -ENOMEM;
848
849
850 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
851 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
852 if (!data) {
853 nlmsg_free(skb);
854 return -EMSGSIZE;
855 }
856
857
858 ib_nl_set_path_rec_attrs(skb, query);
859
860
861 nlmsg_end(skb, nlh);
862
863 return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
864 }
865
866 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
867 {
868 unsigned long flags;
869 unsigned long delay;
870 int ret;
871
872 INIT_LIST_HEAD(&query->list);
873 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
874
875
876 spin_lock_irqsave(&ib_nl_request_lock, flags);
877 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
878 query->timeout = delay + jiffies;
879 list_add_tail(&query->list, &ib_nl_request_list);
880
881 if (ib_nl_request_list.next == &query->list)
882 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
883 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
884
885 ret = ib_nl_send_msg(query, gfp_mask);
886 if (ret) {
887 ret = -EIO;
888
889 spin_lock_irqsave(&ib_nl_request_lock, flags);
890 list_del(&query->list);
891 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
892 }
893
894 return ret;
895 }
896
897 static int ib_nl_cancel_request(struct ib_sa_query *query)
898 {
899 unsigned long flags;
900 struct ib_sa_query *wait_query;
901 int found = 0;
902
903 spin_lock_irqsave(&ib_nl_request_lock, flags);
904 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
905
906 if (query == wait_query) {
907 query->flags |= IB_SA_CANCEL;
908 query->timeout = jiffies;
909 list_move(&query->list, &ib_nl_request_list);
910 found = 1;
911 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
912 break;
913 }
914 }
915 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
916
917 return found;
918 }
919
920 static void send_handler(struct ib_mad_agent *agent,
921 struct ib_mad_send_wc *mad_send_wc);
922
923 static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
924 const struct nlmsghdr *nlh)
925 {
926 struct ib_mad_send_wc mad_send_wc;
927 struct ib_sa_mad *mad = NULL;
928 const struct nlattr *head, *curr;
929 struct ib_path_rec_data *rec;
930 int len, rem;
931 u32 mask = 0;
932 int status = -EIO;
933
934 if (query->callback) {
935 head = (const struct nlattr *) nlmsg_data(nlh);
936 len = nlmsg_len(nlh);
937 switch (query->path_use) {
938 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
939 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
940 break;
941
942 case LS_RESOLVE_PATH_USE_ALL:
943 case LS_RESOLVE_PATH_USE_GMP:
944 default:
945 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
946 IB_PATH_BIDIRECTIONAL;
947 break;
948 }
949 nla_for_each_attr(curr, head, len, rem) {
950 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
951 rec = nla_data(curr);
952
953
954
955
956 if ((rec->flags & mask) == mask) {
957 mad = query->mad_buf->mad;
958 mad->mad_hdr.method |=
959 IB_MGMT_METHOD_RESP;
960 memcpy(mad->data, rec->path_rec,
961 sizeof(rec->path_rec));
962 status = 0;
963 break;
964 }
965 }
966 }
967 query->callback(query, status, mad);
968 }
969
970 mad_send_wc.send_buf = query->mad_buf;
971 mad_send_wc.status = IB_WC_SUCCESS;
972 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
973 }
974
975 static void ib_nl_request_timeout(struct work_struct *work)
976 {
977 unsigned long flags;
978 struct ib_sa_query *query;
979 unsigned long delay;
980 struct ib_mad_send_wc mad_send_wc;
981 int ret;
982
983 spin_lock_irqsave(&ib_nl_request_lock, flags);
984 while (!list_empty(&ib_nl_request_list)) {
985 query = list_entry(ib_nl_request_list.next,
986 struct ib_sa_query, list);
987
988 if (time_after(query->timeout, jiffies)) {
989 delay = query->timeout - jiffies;
990 if ((long)delay <= 0)
991 delay = 1;
992 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
993 break;
994 }
995
996 list_del(&query->list);
997 ib_sa_disable_local_svc(query);
998
999 if (ib_sa_query_cancelled(query))
1000 ret = -1;
1001 else
1002 ret = ib_post_send_mad(query->mad_buf, NULL);
1003 if (ret) {
1004 mad_send_wc.send_buf = query->mad_buf;
1005 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
1006 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1007 send_handler(query->port->agent, &mad_send_wc);
1008 spin_lock_irqsave(&ib_nl_request_lock, flags);
1009 }
1010 }
1011 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1012 }
1013
1014 int ib_nl_handle_set_timeout(struct sk_buff *skb,
1015 struct nlmsghdr *nlh,
1016 struct netlink_ext_ack *extack)
1017 {
1018 int timeout, delta, abs_delta;
1019 const struct nlattr *attr;
1020 unsigned long flags;
1021 struct ib_sa_query *query;
1022 long delay = 0;
1023 struct nlattr *tb[LS_NLA_TYPE_MAX];
1024 int ret;
1025
1026 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
1027 !(NETLINK_CB(skb).sk))
1028 return -EPERM;
1029
1030 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1031 nlmsg_len(nlh), ib_nl_policy, NULL);
1032 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
1033 if (ret || !attr)
1034 goto settimeout_out;
1035
1036 timeout = *(int *) nla_data(attr);
1037 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
1038 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
1039 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
1040 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
1041
1042 delta = timeout - sa_local_svc_timeout_ms;
1043 if (delta < 0)
1044 abs_delta = -delta;
1045 else
1046 abs_delta = delta;
1047
1048 if (delta != 0) {
1049 spin_lock_irqsave(&ib_nl_request_lock, flags);
1050 sa_local_svc_timeout_ms = timeout;
1051 list_for_each_entry(query, &ib_nl_request_list, list) {
1052 if (delta < 0 && abs_delta > query->timeout)
1053 query->timeout = 0;
1054 else
1055 query->timeout += delta;
1056
1057
1058 if (!delay) {
1059 delay = query->timeout - jiffies;
1060 if (delay <= 0)
1061 delay = 1;
1062 }
1063 }
1064 if (delay)
1065 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
1066 (unsigned long)delay);
1067 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1068 }
1069
1070 settimeout_out:
1071 return 0;
1072 }
1073
1074 static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
1075 {
1076 struct nlattr *tb[LS_NLA_TYPE_MAX];
1077 int ret;
1078
1079 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
1080 return 0;
1081
1082 ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
1083 nlmsg_len(nlh), ib_nl_policy, NULL);
1084 if (ret)
1085 return 0;
1086
1087 return 1;
1088 }
1089
1090 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
1091 struct nlmsghdr *nlh,
1092 struct netlink_ext_ack *extack)
1093 {
1094 unsigned long flags;
1095 struct ib_sa_query *query;
1096 struct ib_mad_send_buf *send_buf;
1097 struct ib_mad_send_wc mad_send_wc;
1098 int found = 0;
1099 int ret;
1100
1101 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
1102 !(NETLINK_CB(skb).sk))
1103 return -EPERM;
1104
1105 spin_lock_irqsave(&ib_nl_request_lock, flags);
1106 list_for_each_entry(query, &ib_nl_request_list, list) {
1107
1108
1109
1110
1111 if (nlh->nlmsg_seq == query->seq) {
1112 found = !ib_sa_query_cancelled(query);
1113 if (found)
1114 list_del(&query->list);
1115 break;
1116 }
1117 }
1118
1119 if (!found) {
1120 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1121 goto resp_out;
1122 }
1123
1124 send_buf = query->mad_buf;
1125
1126 if (!ib_nl_is_good_resolve_resp(nlh)) {
1127
1128 ib_sa_disable_local_svc(query);
1129 ret = ib_post_send_mad(query->mad_buf, NULL);
1130 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1131 if (ret) {
1132 mad_send_wc.send_buf = send_buf;
1133 mad_send_wc.status = IB_WC_GENERAL_ERR;
1134 send_handler(query->port->agent, &mad_send_wc);
1135 }
1136 } else {
1137 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
1138 ib_nl_process_good_resolve_rsp(query, nlh);
1139 }
1140
1141 resp_out:
1142 return 0;
1143 }
1144
1145 static void free_sm_ah(struct kref *kref)
1146 {
1147 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
1148
1149 rdma_destroy_ah(sm_ah->ah, 0);
1150 kfree(sm_ah);
1151 }
1152
1153 void ib_sa_register_client(struct ib_sa_client *client)
1154 {
1155 atomic_set(&client->users, 1);
1156 init_completion(&client->comp);
1157 }
1158 EXPORT_SYMBOL(ib_sa_register_client);
1159
1160 void ib_sa_unregister_client(struct ib_sa_client *client)
1161 {
1162 ib_sa_client_put(client);
1163 wait_for_completion(&client->comp);
1164 }
1165 EXPORT_SYMBOL(ib_sa_unregister_client);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1177 {
1178 unsigned long flags;
1179 struct ib_mad_agent *agent;
1180 struct ib_mad_send_buf *mad_buf;
1181
1182 xa_lock_irqsave(&queries, flags);
1183 if (xa_load(&queries, id) != query) {
1184 xa_unlock_irqrestore(&queries, flags);
1185 return;
1186 }
1187 agent = query->port->agent;
1188 mad_buf = query->mad_buf;
1189 xa_unlock_irqrestore(&queries, flags);
1190
1191
1192
1193
1194
1195
1196 if (!ib_nl_cancel_request(query))
1197 ib_cancel_mad(agent, mad_buf);
1198 }
1199 EXPORT_SYMBOL(ib_sa_cancel_query);
1200
1201 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1202 {
1203 struct ib_sa_device *sa_dev;
1204 struct ib_sa_port *port;
1205 unsigned long flags;
1206 u8 src_path_mask;
1207
1208 sa_dev = ib_get_client_data(device, &sa_client);
1209 if (!sa_dev)
1210 return 0x7f;
1211
1212 port = &sa_dev->port[port_num - sa_dev->start_port];
1213 spin_lock_irqsave(&port->ah_lock, flags);
1214 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1215 spin_unlock_irqrestore(&port->ah_lock, flags);
1216
1217 return src_path_mask;
1218 }
1219
1220 static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
1221 struct sa_path_rec *rec,
1222 struct rdma_ah_attr *ah_attr,
1223 const struct ib_gid_attr *gid_attr)
1224 {
1225 enum ib_gid_type type = sa_conv_pathrec_to_gid_type(rec);
1226
1227 if (!gid_attr) {
1228 gid_attr = rdma_find_gid_by_port(device, &rec->sgid, type,
1229 port_num, NULL);
1230 if (IS_ERR(gid_attr))
1231 return PTR_ERR(gid_attr);
1232 } else
1233 rdma_hold_gid_attr(gid_attr);
1234
1235 rdma_move_grh_sgid_attr(ah_attr, &rec->dgid,
1236 be32_to_cpu(rec->flow_label),
1237 rec->hop_limit, rec->traffic_class,
1238 gid_attr);
1239 return 0;
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
1259 struct sa_path_rec *rec,
1260 struct rdma_ah_attr *ah_attr,
1261 const struct ib_gid_attr *gid_attr)
1262 {
1263 int ret = 0;
1264
1265 memset(ah_attr, 0, sizeof(*ah_attr));
1266 ah_attr->type = rdma_ah_find_type(device, port_num);
1267 rdma_ah_set_sl(ah_attr, rec->sl);
1268 rdma_ah_set_port_num(ah_attr, port_num);
1269 rdma_ah_set_static_rate(ah_attr, rec->rate);
1270
1271 if (sa_path_is_roce(rec)) {
1272 ret = roce_resolve_route_from_path(rec, gid_attr);
1273 if (ret)
1274 return ret;
1275
1276 memcpy(ah_attr->roce.dmac, sa_path_get_dmac(rec), ETH_ALEN);
1277 } else {
1278 rdma_ah_set_dlid(ah_attr, be32_to_cpu(sa_path_get_dlid(rec)));
1279 if (sa_path_is_opa(rec) &&
1280 rdma_ah_get_dlid(ah_attr) == be16_to_cpu(IB_LID_PERMISSIVE))
1281 rdma_ah_set_make_grd(ah_attr, true);
1282
1283 rdma_ah_set_path_bits(ah_attr,
1284 be32_to_cpu(sa_path_get_slid(rec)) &
1285 get_src_path_mask(device, port_num));
1286 }
1287
1288 if (rec->hop_limit > 0 || sa_path_is_roce(rec))
1289 ret = init_ah_attr_grh_fields(device, port_num,
1290 rec, ah_attr, gid_attr);
1291 return ret;
1292 }
1293 EXPORT_SYMBOL(ib_init_ah_attr_from_path);
1294
1295 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1296 {
1297 struct rdma_ah_attr ah_attr;
1298 unsigned long flags;
1299
1300 spin_lock_irqsave(&query->port->ah_lock, flags);
1301 if (!query->port->sm_ah) {
1302 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1303 return -EAGAIN;
1304 }
1305 kref_get(&query->port->sm_ah->ref);
1306 query->sm_ah = query->port->sm_ah;
1307 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1308
1309
1310
1311
1312
1313 if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
1314 !rdma_is_valid_unicast_lid(&ah_attr)) {
1315 kref_put(&query->sm_ah->ref, free_sm_ah);
1316 return -EAGAIN;
1317 }
1318 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1319 query->sm_ah->pkey_index,
1320 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
1321 gfp_mask,
1322 ((query->flags & IB_SA_QUERY_OPA) ?
1323 OPA_MGMT_BASE_VERSION :
1324 IB_MGMT_BASE_VERSION));
1325 if (IS_ERR(query->mad_buf)) {
1326 kref_put(&query->sm_ah->ref, free_sm_ah);
1327 return -ENOMEM;
1328 }
1329
1330 query->mad_buf->ah = query->sm_ah->ah;
1331
1332 return 0;
1333 }
1334
1335 static void free_mad(struct ib_sa_query *query)
1336 {
1337 ib_free_send_mad(query->mad_buf);
1338 kref_put(&query->sm_ah->ref, free_sm_ah);
1339 }
1340
1341 static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
1342 {
1343 struct ib_sa_mad *mad = query->mad_buf->mad;
1344 unsigned long flags;
1345
1346 memset(mad, 0, sizeof *mad);
1347
1348 if (query->flags & IB_SA_QUERY_OPA) {
1349 mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
1350 mad->mad_hdr.class_version = OPA_SA_CLASS_VERSION;
1351 } else {
1352 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1353 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1354 }
1355 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1356 spin_lock_irqsave(&tid_lock, flags);
1357 mad->mad_hdr.tid =
1358 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1359 spin_unlock_irqrestore(&tid_lock, flags);
1360 }
1361
1362 static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
1363 gfp_t gfp_mask)
1364 {
1365 unsigned long flags;
1366 int ret, id;
1367
1368 xa_lock_irqsave(&queries, flags);
1369 ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
1370 xa_unlock_irqrestore(&queries, flags);
1371 if (ret < 0)
1372 return ret;
1373
1374 query->mad_buf->timeout_ms = timeout_ms;
1375 query->mad_buf->context[0] = query;
1376 query->id = id;
1377
1378 if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
1379 (!(query->flags & IB_SA_QUERY_OPA))) {
1380 if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
1381 if (!ib_nl_make_request(query, gfp_mask))
1382 return id;
1383 }
1384 ib_sa_disable_local_svc(query);
1385 }
1386
1387 ret = ib_post_send_mad(query->mad_buf, NULL);
1388 if (ret) {
1389 xa_lock_irqsave(&queries, flags);
1390 __xa_erase(&queries, id);
1391 xa_unlock_irqrestore(&queries, flags);
1392 }
1393
1394
1395
1396
1397
1398
1399 return ret ? ret : id;
1400 }
1401
1402 void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec)
1403 {
1404 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1405 }
1406 EXPORT_SYMBOL(ib_sa_unpack_path);
1407
1408 void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
1409 {
1410 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1411 }
1412 EXPORT_SYMBOL(ib_sa_pack_path);
1413
1414 static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
1415 struct ib_device *device,
1416 u8 port_num)
1417 {
1418 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1419 struct ib_sa_port *port;
1420 unsigned long flags;
1421 bool ret = false;
1422
1423 if (!sa_dev)
1424 return ret;
1425
1426 port = &sa_dev->port[port_num - sa_dev->start_port];
1427 spin_lock_irqsave(&port->classport_lock, flags);
1428 if (!port->classport_info.valid)
1429 goto ret;
1430
1431 if (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_OPA)
1432 ret = opa_get_cpi_capmask2(&port->classport_info.data.opa) &
1433 OPA_CLASS_PORT_INFO_PR_SUPPORT;
1434 ret:
1435 spin_unlock_irqrestore(&port->classport_lock, flags);
1436 return ret;
1437 }
1438
1439 enum opa_pr_supported {
1440 PR_NOT_SUPPORTED,
1441 PR_OPA_SUPPORTED,
1442 PR_IB_SUPPORTED
1443 };
1444
1445
1446
1447
1448
1449
1450
1451
1452 static int opa_pr_query_possible(struct ib_sa_client *client,
1453 struct ib_device *device,
1454 u8 port_num,
1455 struct sa_path_rec *rec)
1456 {
1457 struct ib_port_attr port_attr;
1458
1459 if (ib_query_port(device, port_num, &port_attr))
1460 return PR_NOT_SUPPORTED;
1461
1462 if (ib_sa_opa_pathrecord_support(client, device, port_num))
1463 return PR_OPA_SUPPORTED;
1464
1465 if (port_attr.lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
1466 return PR_NOT_SUPPORTED;
1467 else
1468 return PR_IB_SUPPORTED;
1469 }
1470
1471 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1472 int status,
1473 struct ib_sa_mad *mad)
1474 {
1475 struct ib_sa_path_query *query =
1476 container_of(sa_query, struct ib_sa_path_query, sa_query);
1477
1478 if (mad) {
1479 struct sa_path_rec rec;
1480
1481 if (sa_query->flags & IB_SA_QUERY_OPA) {
1482 ib_unpack(opa_path_rec_table,
1483 ARRAY_SIZE(opa_path_rec_table),
1484 mad->data, &rec);
1485 rec.rec_type = SA_PATH_REC_TYPE_OPA;
1486 query->callback(status, &rec, query->context);
1487 } else {
1488 ib_unpack(path_rec_table,
1489 ARRAY_SIZE(path_rec_table),
1490 mad->data, &rec);
1491 rec.rec_type = SA_PATH_REC_TYPE_IB;
1492 sa_path_set_dmac_zero(&rec);
1493
1494 if (query->conv_pr) {
1495 struct sa_path_rec opa;
1496
1497 memset(&opa, 0, sizeof(struct sa_path_rec));
1498 sa_convert_path_ib_to_opa(&opa, &rec);
1499 query->callback(status, &opa, query->context);
1500 } else {
1501 query->callback(status, &rec, query->context);
1502 }
1503 }
1504 } else
1505 query->callback(status, NULL, query->context);
1506 }
1507
1508 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1509 {
1510 struct ib_sa_path_query *query =
1511 container_of(sa_query, struct ib_sa_path_query, sa_query);
1512
1513 kfree(query->conv_pr);
1514 kfree(query);
1515 }
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 int ib_sa_path_rec_get(struct ib_sa_client *client,
1543 struct ib_device *device, u8 port_num,
1544 struct sa_path_rec *rec,
1545 ib_sa_comp_mask comp_mask,
1546 unsigned long timeout_ms, gfp_t gfp_mask,
1547 void (*callback)(int status,
1548 struct sa_path_rec *resp,
1549 void *context),
1550 void *context,
1551 struct ib_sa_query **sa_query)
1552 {
1553 struct ib_sa_path_query *query;
1554 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1555 struct ib_sa_port *port;
1556 struct ib_mad_agent *agent;
1557 struct ib_sa_mad *mad;
1558 enum opa_pr_supported status;
1559 int ret;
1560
1561 if (!sa_dev)
1562 return -ENODEV;
1563
1564 if ((rec->rec_type != SA_PATH_REC_TYPE_IB) &&
1565 (rec->rec_type != SA_PATH_REC_TYPE_OPA))
1566 return -EINVAL;
1567
1568 port = &sa_dev->port[port_num - sa_dev->start_port];
1569 agent = port->agent;
1570
1571 query = kzalloc(sizeof(*query), gfp_mask);
1572 if (!query)
1573 return -ENOMEM;
1574
1575 query->sa_query.port = port;
1576 if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
1577 status = opa_pr_query_possible(client, device, port_num, rec);
1578 if (status == PR_NOT_SUPPORTED) {
1579 ret = -EINVAL;
1580 goto err1;
1581 } else if (status == PR_OPA_SUPPORTED) {
1582 query->sa_query.flags |= IB_SA_QUERY_OPA;
1583 } else {
1584 query->conv_pr =
1585 kmalloc(sizeof(*query->conv_pr), gfp_mask);
1586 if (!query->conv_pr) {
1587 ret = -ENOMEM;
1588 goto err1;
1589 }
1590 }
1591 }
1592
1593 ret = alloc_mad(&query->sa_query, gfp_mask);
1594 if (ret)
1595 goto err2;
1596
1597 ib_sa_client_get(client);
1598 query->sa_query.client = client;
1599 query->callback = callback;
1600 query->context = context;
1601
1602 mad = query->sa_query.mad_buf->mad;
1603 init_mad(&query->sa_query, agent);
1604
1605 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1606 query->sa_query.release = ib_sa_path_rec_release;
1607 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1608 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1609 mad->sa_hdr.comp_mask = comp_mask;
1610
1611 if (query->sa_query.flags & IB_SA_QUERY_OPA) {
1612 ib_pack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
1613 rec, mad->data);
1614 } else if (query->conv_pr) {
1615 sa_convert_path_opa_to_ib(query->conv_pr, rec);
1616 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1617 query->conv_pr, mad->data);
1618 } else {
1619 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
1620 rec, mad->data);
1621 }
1622
1623 *sa_query = &query->sa_query;
1624
1625 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1626 query->sa_query.mad_buf->context[1] = (query->conv_pr) ?
1627 query->conv_pr : rec;
1628
1629 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1630 if (ret < 0)
1631 goto err3;
1632
1633 return ret;
1634
1635 err3:
1636 *sa_query = NULL;
1637 ib_sa_client_put(query->sa_query.client);
1638 free_mad(&query->sa_query);
1639 err2:
1640 kfree(query->conv_pr);
1641 err1:
1642 kfree(query);
1643 return ret;
1644 }
1645 EXPORT_SYMBOL(ib_sa_path_rec_get);
1646
1647 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1648 int status,
1649 struct ib_sa_mad *mad)
1650 {
1651 struct ib_sa_service_query *query =
1652 container_of(sa_query, struct ib_sa_service_query, sa_query);
1653
1654 if (mad) {
1655 struct ib_sa_service_rec rec;
1656
1657 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1658 mad->data, &rec);
1659 query->callback(status, &rec, query->context);
1660 } else
1661 query->callback(status, NULL, query->context);
1662 }
1663
1664 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1665 {
1666 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1667 }
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696 int ib_sa_service_rec_query(struct ib_sa_client *client,
1697 struct ib_device *device, u8 port_num, u8 method,
1698 struct ib_sa_service_rec *rec,
1699 ib_sa_comp_mask comp_mask,
1700 unsigned long timeout_ms, gfp_t gfp_mask,
1701 void (*callback)(int status,
1702 struct ib_sa_service_rec *resp,
1703 void *context),
1704 void *context,
1705 struct ib_sa_query **sa_query)
1706 {
1707 struct ib_sa_service_query *query;
1708 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1709 struct ib_sa_port *port;
1710 struct ib_mad_agent *agent;
1711 struct ib_sa_mad *mad;
1712 int ret;
1713
1714 if (!sa_dev)
1715 return -ENODEV;
1716
1717 port = &sa_dev->port[port_num - sa_dev->start_port];
1718 agent = port->agent;
1719
1720 if (method != IB_MGMT_METHOD_GET &&
1721 method != IB_MGMT_METHOD_SET &&
1722 method != IB_SA_METHOD_DELETE)
1723 return -EINVAL;
1724
1725 query = kzalloc(sizeof(*query), gfp_mask);
1726 if (!query)
1727 return -ENOMEM;
1728
1729 query->sa_query.port = port;
1730 ret = alloc_mad(&query->sa_query, gfp_mask);
1731 if (ret)
1732 goto err1;
1733
1734 ib_sa_client_get(client);
1735 query->sa_query.client = client;
1736 query->callback = callback;
1737 query->context = context;
1738
1739 mad = query->sa_query.mad_buf->mad;
1740 init_mad(&query->sa_query, agent);
1741
1742 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1743 query->sa_query.release = ib_sa_service_rec_release;
1744 mad->mad_hdr.method = method;
1745 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1746 mad->sa_hdr.comp_mask = comp_mask;
1747
1748 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1749 rec, mad->data);
1750
1751 *sa_query = &query->sa_query;
1752
1753 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1754 if (ret < 0)
1755 goto err2;
1756
1757 return ret;
1758
1759 err2:
1760 *sa_query = NULL;
1761 ib_sa_client_put(query->sa_query.client);
1762 free_mad(&query->sa_query);
1763
1764 err1:
1765 kfree(query);
1766 return ret;
1767 }
1768 EXPORT_SYMBOL(ib_sa_service_rec_query);
1769
1770 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1771 int status,
1772 struct ib_sa_mad *mad)
1773 {
1774 struct ib_sa_mcmember_query *query =
1775 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1776
1777 if (mad) {
1778 struct ib_sa_mcmember_rec rec;
1779
1780 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1781 mad->data, &rec);
1782 query->callback(status, &rec, query->context);
1783 } else
1784 query->callback(status, NULL, query->context);
1785 }
1786
1787 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1788 {
1789 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1790 }
1791
1792 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1793 struct ib_device *device, u8 port_num,
1794 u8 method,
1795 struct ib_sa_mcmember_rec *rec,
1796 ib_sa_comp_mask comp_mask,
1797 unsigned long timeout_ms, gfp_t gfp_mask,
1798 void (*callback)(int status,
1799 struct ib_sa_mcmember_rec *resp,
1800 void *context),
1801 void *context,
1802 struct ib_sa_query **sa_query)
1803 {
1804 struct ib_sa_mcmember_query *query;
1805 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1806 struct ib_sa_port *port;
1807 struct ib_mad_agent *agent;
1808 struct ib_sa_mad *mad;
1809 int ret;
1810
1811 if (!sa_dev)
1812 return -ENODEV;
1813
1814 port = &sa_dev->port[port_num - sa_dev->start_port];
1815 agent = port->agent;
1816
1817 query = kzalloc(sizeof(*query), gfp_mask);
1818 if (!query)
1819 return -ENOMEM;
1820
1821 query->sa_query.port = port;
1822 ret = alloc_mad(&query->sa_query, gfp_mask);
1823 if (ret)
1824 goto err1;
1825
1826 ib_sa_client_get(client);
1827 query->sa_query.client = client;
1828 query->callback = callback;
1829 query->context = context;
1830
1831 mad = query->sa_query.mad_buf->mad;
1832 init_mad(&query->sa_query, agent);
1833
1834 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1835 query->sa_query.release = ib_sa_mcmember_rec_release;
1836 mad->mad_hdr.method = method;
1837 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1838 mad->sa_hdr.comp_mask = comp_mask;
1839
1840 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1841 rec, mad->data);
1842
1843 *sa_query = &query->sa_query;
1844
1845 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1846 if (ret < 0)
1847 goto err2;
1848
1849 return ret;
1850
1851 err2:
1852 *sa_query = NULL;
1853 ib_sa_client_put(query->sa_query.client);
1854 free_mad(&query->sa_query);
1855
1856 err1:
1857 kfree(query);
1858 return ret;
1859 }
1860
1861
1862 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1863 int status,
1864 struct ib_sa_mad *mad)
1865 {
1866 struct ib_sa_guidinfo_query *query =
1867 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1868
1869 if (mad) {
1870 struct ib_sa_guidinfo_rec rec;
1871
1872 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1873 mad->data, &rec);
1874 query->callback(status, &rec, query->context);
1875 } else
1876 query->callback(status, NULL, query->context);
1877 }
1878
1879 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1880 {
1881 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1882 }
1883
1884 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1885 struct ib_device *device, u8 port_num,
1886 struct ib_sa_guidinfo_rec *rec,
1887 ib_sa_comp_mask comp_mask, u8 method,
1888 unsigned long timeout_ms, gfp_t gfp_mask,
1889 void (*callback)(int status,
1890 struct ib_sa_guidinfo_rec *resp,
1891 void *context),
1892 void *context,
1893 struct ib_sa_query **sa_query)
1894 {
1895 struct ib_sa_guidinfo_query *query;
1896 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1897 struct ib_sa_port *port;
1898 struct ib_mad_agent *agent;
1899 struct ib_sa_mad *mad;
1900 int ret;
1901
1902 if (!sa_dev)
1903 return -ENODEV;
1904
1905 if (method != IB_MGMT_METHOD_GET &&
1906 method != IB_MGMT_METHOD_SET &&
1907 method != IB_SA_METHOD_DELETE) {
1908 return -EINVAL;
1909 }
1910
1911 port = &sa_dev->port[port_num - sa_dev->start_port];
1912 agent = port->agent;
1913
1914 query = kzalloc(sizeof(*query), gfp_mask);
1915 if (!query)
1916 return -ENOMEM;
1917
1918 query->sa_query.port = port;
1919 ret = alloc_mad(&query->sa_query, gfp_mask);
1920 if (ret)
1921 goto err1;
1922
1923 ib_sa_client_get(client);
1924 query->sa_query.client = client;
1925 query->callback = callback;
1926 query->context = context;
1927
1928 mad = query->sa_query.mad_buf->mad;
1929 init_mad(&query->sa_query, agent);
1930
1931 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1932 query->sa_query.release = ib_sa_guidinfo_rec_release;
1933
1934 mad->mad_hdr.method = method;
1935 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1936 mad->sa_hdr.comp_mask = comp_mask;
1937
1938 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1939 mad->data);
1940
1941 *sa_query = &query->sa_query;
1942
1943 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1944 if (ret < 0)
1945 goto err2;
1946
1947 return ret;
1948
1949 err2:
1950 *sa_query = NULL;
1951 ib_sa_client_put(query->sa_query.client);
1952 free_mad(&query->sa_query);
1953
1954 err1:
1955 kfree(query);
1956 return ret;
1957 }
1958 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1959
1960 bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
1961 struct ib_device *device,
1962 u8 port_num)
1963 {
1964 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1965 struct ib_sa_port *port;
1966 bool ret = false;
1967 unsigned long flags;
1968
1969 if (!sa_dev)
1970 return ret;
1971
1972 port = &sa_dev->port[port_num - sa_dev->start_port];
1973
1974 spin_lock_irqsave(&port->classport_lock, flags);
1975 if ((port->classport_info.valid) &&
1976 (port->classport_info.data.type == RDMA_CLASS_PORT_INFO_IB))
1977 ret = ib_get_cpi_capmask2(&port->classport_info.data.ib)
1978 & IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT;
1979 spin_unlock_irqrestore(&port->classport_lock, flags);
1980 return ret;
1981 }
1982 EXPORT_SYMBOL(ib_sa_sendonly_fullmem_support);
1983
1984 struct ib_classport_info_context {
1985 struct completion done;
1986 struct ib_sa_query *sa_query;
1987 };
1988
1989 static void ib_classportinfo_cb(void *context)
1990 {
1991 struct ib_classport_info_context *cb_ctx = context;
1992
1993 complete(&cb_ctx->done);
1994 }
1995
1996 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1997 int status,
1998 struct ib_sa_mad *mad)
1999 {
2000 unsigned long flags;
2001 struct ib_sa_classport_info_query *query =
2002 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
2003 struct ib_sa_classport_cache *info = &sa_query->port->classport_info;
2004
2005 if (mad) {
2006 if (sa_query->flags & IB_SA_QUERY_OPA) {
2007 struct opa_class_port_info rec;
2008
2009 ib_unpack(opa_classport_info_rec_table,
2010 ARRAY_SIZE(opa_classport_info_rec_table),
2011 mad->data, &rec);
2012
2013 spin_lock_irqsave(&sa_query->port->classport_lock,
2014 flags);
2015 if (!status && !info->valid) {
2016 memcpy(&info->data.opa, &rec,
2017 sizeof(info->data.opa));
2018
2019 info->valid = true;
2020 info->data.type = RDMA_CLASS_PORT_INFO_OPA;
2021 }
2022 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2023 flags);
2024
2025 } else {
2026 struct ib_class_port_info rec;
2027
2028 ib_unpack(ib_classport_info_rec_table,
2029 ARRAY_SIZE(ib_classport_info_rec_table),
2030 mad->data, &rec);
2031
2032 spin_lock_irqsave(&sa_query->port->classport_lock,
2033 flags);
2034 if (!status && !info->valid) {
2035 memcpy(&info->data.ib, &rec,
2036 sizeof(info->data.ib));
2037
2038 info->valid = true;
2039 info->data.type = RDMA_CLASS_PORT_INFO_IB;
2040 }
2041 spin_unlock_irqrestore(&sa_query->port->classport_lock,
2042 flags);
2043 }
2044 }
2045 query->callback(query->context);
2046 }
2047
2048 static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
2049 {
2050 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
2051 sa_query));
2052 }
2053
2054 static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
2055 unsigned long timeout_ms,
2056 void (*callback)(void *context),
2057 void *context,
2058 struct ib_sa_query **sa_query)
2059 {
2060 struct ib_mad_agent *agent;
2061 struct ib_sa_classport_info_query *query;
2062 struct ib_sa_mad *mad;
2063 gfp_t gfp_mask = GFP_KERNEL;
2064 int ret;
2065
2066 agent = port->agent;
2067
2068 query = kzalloc(sizeof(*query), gfp_mask);
2069 if (!query)
2070 return -ENOMEM;
2071
2072 query->sa_query.port = port;
2073 query->sa_query.flags |= rdma_cap_opa_ah(port->agent->device,
2074 port->port_num) ?
2075 IB_SA_QUERY_OPA : 0;
2076 ret = alloc_mad(&query->sa_query, gfp_mask);
2077 if (ret)
2078 goto err_free;
2079
2080 query->callback = callback;
2081 query->context = context;
2082
2083 mad = query->sa_query.mad_buf->mad;
2084 init_mad(&query->sa_query, agent);
2085
2086 query->sa_query.callback = ib_sa_classport_info_rec_callback;
2087 query->sa_query.release = ib_sa_classport_info_rec_release;
2088 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
2089 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
2090 mad->sa_hdr.comp_mask = 0;
2091 *sa_query = &query->sa_query;
2092
2093 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
2094 if (ret < 0)
2095 goto err_free_mad;
2096
2097 return ret;
2098
2099 err_free_mad:
2100 *sa_query = NULL;
2101 free_mad(&query->sa_query);
2102
2103 err_free:
2104 kfree(query);
2105 return ret;
2106 }
2107
2108 static void update_ib_cpi(struct work_struct *work)
2109 {
2110 struct ib_sa_port *port =
2111 container_of(work, struct ib_sa_port, ib_cpi_work.work);
2112 struct ib_classport_info_context *cb_context;
2113 unsigned long flags;
2114 int ret;
2115
2116
2117
2118
2119 spin_lock_irqsave(&port->classport_lock, flags);
2120 if (port->classport_info.valid) {
2121 spin_unlock_irqrestore(&port->classport_lock, flags);
2122 return;
2123 }
2124 spin_unlock_irqrestore(&port->classport_lock, flags);
2125
2126 cb_context = kmalloc(sizeof(*cb_context), GFP_KERNEL);
2127 if (!cb_context)
2128 goto err_nomem;
2129
2130 init_completion(&cb_context->done);
2131
2132 ret = ib_sa_classport_info_rec_query(port, 3000,
2133 ib_classportinfo_cb, cb_context,
2134 &cb_context->sa_query);
2135 if (ret < 0)
2136 goto free_cb_err;
2137 wait_for_completion(&cb_context->done);
2138 free_cb_err:
2139 kfree(cb_context);
2140 spin_lock_irqsave(&port->classport_lock, flags);
2141
2142
2143
2144
2145 if (!port->classport_info.valid) {
2146 port->classport_info.retry_cnt++;
2147 if (port->classport_info.retry_cnt <=
2148 IB_SA_CPI_MAX_RETRY_CNT) {
2149 unsigned long delay =
2150 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2151
2152 queue_delayed_work(ib_wq, &port->ib_cpi_work, delay);
2153 }
2154 }
2155 spin_unlock_irqrestore(&port->classport_lock, flags);
2156
2157 err_nomem:
2158 return;
2159 }
2160
2161 static void send_handler(struct ib_mad_agent *agent,
2162 struct ib_mad_send_wc *mad_send_wc)
2163 {
2164 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
2165 unsigned long flags;
2166
2167 if (query->callback)
2168 switch (mad_send_wc->status) {
2169 case IB_WC_SUCCESS:
2170
2171 break;
2172 case IB_WC_RESP_TIMEOUT_ERR:
2173 query->callback(query, -ETIMEDOUT, NULL);
2174 break;
2175 case IB_WC_WR_FLUSH_ERR:
2176 query->callback(query, -EINTR, NULL);
2177 break;
2178 default:
2179 query->callback(query, -EIO, NULL);
2180 break;
2181 }
2182
2183 xa_lock_irqsave(&queries, flags);
2184 __xa_erase(&queries, query->id);
2185 xa_unlock_irqrestore(&queries, flags);
2186
2187 free_mad(query);
2188 if (query->client)
2189 ib_sa_client_put(query->client);
2190 query->release(query);
2191 }
2192
2193 static void recv_handler(struct ib_mad_agent *mad_agent,
2194 struct ib_mad_send_buf *send_buf,
2195 struct ib_mad_recv_wc *mad_recv_wc)
2196 {
2197 struct ib_sa_query *query;
2198
2199 if (!send_buf)
2200 return;
2201
2202 query = send_buf->context[0];
2203 if (query->callback) {
2204 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
2205 query->callback(query,
2206 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
2207 -EINVAL : 0,
2208 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
2209 else
2210 query->callback(query, -EIO, NULL);
2211 }
2212
2213 ib_free_recv_mad(mad_recv_wc);
2214 }
2215
2216 static void update_sm_ah(struct work_struct *work)
2217 {
2218 struct ib_sa_port *port =
2219 container_of(work, struct ib_sa_port, update_task);
2220 struct ib_sa_sm_ah *new_ah;
2221 struct ib_port_attr port_attr;
2222 struct rdma_ah_attr ah_attr;
2223 bool grh_required;
2224
2225 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
2226 pr_warn("Couldn't query port\n");
2227 return;
2228 }
2229
2230 new_ah = kmalloc(sizeof(*new_ah), GFP_KERNEL);
2231 if (!new_ah)
2232 return;
2233
2234 kref_init(&new_ah->ref);
2235 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
2236
2237 new_ah->pkey_index = 0;
2238 if (ib_find_pkey(port->agent->device, port->port_num,
2239 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
2240 pr_err("Couldn't find index for default PKey\n");
2241
2242 memset(&ah_attr, 0, sizeof(ah_attr));
2243 ah_attr.type = rdma_ah_find_type(port->agent->device,
2244 port->port_num);
2245 rdma_ah_set_dlid(&ah_attr, port_attr.sm_lid);
2246 rdma_ah_set_sl(&ah_attr, port_attr.sm_sl);
2247 rdma_ah_set_port_num(&ah_attr, port->port_num);
2248
2249 grh_required = rdma_is_grh_required(port->agent->device,
2250 port->port_num);
2251
2252
2253
2254
2255
2256
2257
2258 if (ah_attr.type == RDMA_AH_ATTR_TYPE_OPA &&
2259 (grh_required ||
2260 port_attr.sm_lid == be16_to_cpu(IB_LID_PERMISSIVE)))
2261 rdma_ah_set_make_grd(&ah_attr, true);
2262
2263 if (ah_attr.type == RDMA_AH_ATTR_TYPE_IB && grh_required) {
2264 rdma_ah_set_ah_flags(&ah_attr, IB_AH_GRH);
2265 rdma_ah_set_subnet_prefix(&ah_attr,
2266 cpu_to_be64(port_attr.subnet_prefix));
2267 rdma_ah_set_interface_id(&ah_attr,
2268 cpu_to_be64(IB_SA_WELL_KNOWN_GUID));
2269 }
2270
2271 new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr,
2272 RDMA_CREATE_AH_SLEEPABLE);
2273 if (IS_ERR(new_ah->ah)) {
2274 pr_warn("Couldn't create new SM AH\n");
2275 kfree(new_ah);
2276 return;
2277 }
2278
2279 spin_lock_irq(&port->ah_lock);
2280 if (port->sm_ah)
2281 kref_put(&port->sm_ah->ref, free_sm_ah);
2282 port->sm_ah = new_ah;
2283 spin_unlock_irq(&port->ah_lock);
2284 }
2285
2286 static void ib_sa_event(struct ib_event_handler *handler,
2287 struct ib_event *event)
2288 {
2289 if (event->event == IB_EVENT_PORT_ERR ||
2290 event->event == IB_EVENT_PORT_ACTIVE ||
2291 event->event == IB_EVENT_LID_CHANGE ||
2292 event->event == IB_EVENT_PKEY_CHANGE ||
2293 event->event == IB_EVENT_SM_CHANGE ||
2294 event->event == IB_EVENT_CLIENT_REREGISTER) {
2295 unsigned long flags;
2296 struct ib_sa_device *sa_dev =
2297 container_of(handler, typeof(*sa_dev), event_handler);
2298 u8 port_num = event->element.port_num - sa_dev->start_port;
2299 struct ib_sa_port *port = &sa_dev->port[port_num];
2300
2301 if (!rdma_cap_ib_sa(handler->device, port->port_num))
2302 return;
2303
2304 spin_lock_irqsave(&port->ah_lock, flags);
2305 if (port->sm_ah)
2306 kref_put(&port->sm_ah->ref, free_sm_ah);
2307 port->sm_ah = NULL;
2308 spin_unlock_irqrestore(&port->ah_lock, flags);
2309
2310 if (event->event == IB_EVENT_SM_CHANGE ||
2311 event->event == IB_EVENT_CLIENT_REREGISTER ||
2312 event->event == IB_EVENT_LID_CHANGE ||
2313 event->event == IB_EVENT_PORT_ACTIVE) {
2314 unsigned long delay =
2315 msecs_to_jiffies(IB_SA_CPI_RETRY_WAIT);
2316
2317 spin_lock_irqsave(&port->classport_lock, flags);
2318 port->classport_info.valid = false;
2319 port->classport_info.retry_cnt = 0;
2320 spin_unlock_irqrestore(&port->classport_lock, flags);
2321 queue_delayed_work(ib_wq,
2322 &port->ib_cpi_work, delay);
2323 }
2324 queue_work(ib_wq, &sa_dev->port[port_num].update_task);
2325 }
2326 }
2327
2328 static void ib_sa_add_one(struct ib_device *device)
2329 {
2330 struct ib_sa_device *sa_dev;
2331 int s, e, i;
2332 int count = 0;
2333
2334 s = rdma_start_port(device);
2335 e = rdma_end_port(device);
2336
2337 sa_dev = kzalloc(struct_size(sa_dev, port, e - s + 1), GFP_KERNEL);
2338 if (!sa_dev)
2339 return;
2340
2341 sa_dev->start_port = s;
2342 sa_dev->end_port = e;
2343
2344 for (i = 0; i <= e - s; ++i) {
2345 spin_lock_init(&sa_dev->port[i].ah_lock);
2346 if (!rdma_cap_ib_sa(device, i + 1))
2347 continue;
2348
2349 sa_dev->port[i].sm_ah = NULL;
2350 sa_dev->port[i].port_num = i + s;
2351
2352 spin_lock_init(&sa_dev->port[i].classport_lock);
2353 sa_dev->port[i].classport_info.valid = false;
2354
2355 sa_dev->port[i].agent =
2356 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
2357 NULL, 0, send_handler,
2358 recv_handler, sa_dev, 0);
2359 if (IS_ERR(sa_dev->port[i].agent))
2360 goto err;
2361
2362 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
2363 INIT_DELAYED_WORK(&sa_dev->port[i].ib_cpi_work,
2364 update_ib_cpi);
2365
2366 count++;
2367 }
2368
2369 if (!count)
2370 goto free;
2371
2372 ib_set_client_data(device, &sa_client, sa_dev);
2373
2374
2375
2376
2377
2378
2379
2380
2381 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
2382 ib_register_event_handler(&sa_dev->event_handler);
2383
2384 for (i = 0; i <= e - s; ++i) {
2385 if (rdma_cap_ib_sa(device, i + 1))
2386 update_sm_ah(&sa_dev->port[i].update_task);
2387 }
2388
2389 return;
2390
2391 err:
2392 while (--i >= 0) {
2393 if (rdma_cap_ib_sa(device, i + 1))
2394 ib_unregister_mad_agent(sa_dev->port[i].agent);
2395 }
2396 free:
2397 kfree(sa_dev);
2398 return;
2399 }
2400
2401 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
2402 {
2403 struct ib_sa_device *sa_dev = client_data;
2404 int i;
2405
2406 if (!sa_dev)
2407 return;
2408
2409 ib_unregister_event_handler(&sa_dev->event_handler);
2410 flush_workqueue(ib_wq);
2411
2412 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
2413 if (rdma_cap_ib_sa(device, i + 1)) {
2414 cancel_delayed_work_sync(&sa_dev->port[i].ib_cpi_work);
2415 ib_unregister_mad_agent(sa_dev->port[i].agent);
2416 if (sa_dev->port[i].sm_ah)
2417 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
2418 }
2419
2420 }
2421
2422 kfree(sa_dev);
2423 }
2424
2425 int ib_sa_init(void)
2426 {
2427 int ret;
2428
2429 get_random_bytes(&tid, sizeof tid);
2430
2431 atomic_set(&ib_nl_sa_request_seq, 0);
2432
2433 ret = ib_register_client(&sa_client);
2434 if (ret) {
2435 pr_err("Couldn't register ib_sa client\n");
2436 goto err1;
2437 }
2438
2439 ret = mcast_init();
2440 if (ret) {
2441 pr_err("Couldn't initialize multicast handling\n");
2442 goto err2;
2443 }
2444
2445 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
2446 if (!ib_nl_wq) {
2447 ret = -ENOMEM;
2448 goto err3;
2449 }
2450
2451 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2452
2453 return 0;
2454
2455 err3:
2456 mcast_cleanup();
2457 err2:
2458 ib_unregister_client(&sa_client);
2459 err1:
2460 return ret;
2461 }
2462
2463 void ib_sa_cleanup(void)
2464 {
2465 cancel_delayed_work(&ib_nl_timed_work);
2466 flush_workqueue(ib_nl_wq);
2467 destroy_workqueue(ib_nl_wq);
2468 mcast_cleanup();
2469 ib_unregister_client(&sa_client);
2470 WARN_ON(!xa_empty(&queries));
2471 }