1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <linux/module.h> 36#include <linux/init.h> 37#include <linux/err.h> 38#include <linux/random.h> 39#include <linux/spinlock.h> 40#include <linux/slab.h> 41#include <linux/dma-mapping.h> 42#include <linux/kref.h> 43#include <linux/idr.h> 44#include <linux/workqueue.h> 45#include <uapi/linux/if_ether.h> 46#include <rdma/ib_pack.h> 47#include <rdma/ib_cache.h> 48#include <rdma/rdma_netlink.h> 49#include <net/netlink.h> 50#include <uapi/rdma/ib_user_sa.h> 51#include <rdma/ib_marshall.h> 52#include "sa.h" 53 54MODULE_AUTHOR("Roland Dreier"); 55MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 56MODULE_LICENSE("Dual BSD/GPL"); 57 58#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 59#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 60#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 61static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT; 62 63struct ib_sa_sm_ah { 64 struct ib_ah *ah; 65 struct kref ref; 66 u16 pkey_index; 67 u8 src_path_mask; 68}; 69 70struct ib_sa_port { 71 struct ib_mad_agent *agent; 72 struct ib_sa_sm_ah *sm_ah; 73 struct work_struct update_task; 74 spinlock_t ah_lock; 75 u8 port_num; 76}; 77 78struct ib_sa_device { 79 int start_port, end_port; 80 struct ib_event_handler event_handler; 81 struct ib_sa_port port[0]; 82}; 83 84struct ib_sa_query { 85 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 86 void (*release)(struct ib_sa_query *); 87 struct ib_sa_client *client; 88 struct ib_sa_port *port; 89 struct ib_mad_send_buf *mad_buf; 90 struct ib_sa_sm_ah *sm_ah; 91 int id; 92 u32 flags; 93 struct list_head list; /* Local svc request list */ 94 u32 seq; /* Local svc request sequence number */ 95 unsigned long timeout; /* Local svc timeout */ 96 u8 path_use; /* How will the pathrecord be used */ 97}; 98 99#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 100#define IB_SA_CANCEL 0x00000002 101 102struct ib_sa_service_query { 103 void (*callback)(int, struct ib_sa_service_rec *, void *); 104 void *context; 105 struct ib_sa_query sa_query; 106}; 107 108struct ib_sa_path_query { 109 void (*callback)(int, struct ib_sa_path_rec *, void *); 110 void *context; 111 struct ib_sa_query sa_query; 112}; 113 114struct ib_sa_guidinfo_query { 115 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 116 void *context; 117 struct ib_sa_query sa_query; 118}; 119 120struct ib_sa_mcmember_query { 121 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 122 void *context; 123 struct ib_sa_query sa_query; 124}; 125 126static LIST_HEAD(ib_nl_request_list); 127static DEFINE_SPINLOCK(ib_nl_request_lock); 128static atomic_t ib_nl_sa_request_seq; 129static struct workqueue_struct *ib_nl_wq; 130static struct delayed_work ib_nl_timed_work; 131static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = { 132 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY, 133 .len = sizeof(struct ib_path_rec_data)}, 134 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32}, 135 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64}, 136 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY, 137 .len = sizeof(struct rdma_nla_ls_gid)}, 138 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY, 139 .len = sizeof(struct rdma_nla_ls_gid)}, 140 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8}, 141 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16}, 142 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16}, 143}; 144 145 146static void ib_sa_add_one(struct ib_device *device); 147static void ib_sa_remove_one(struct ib_device *device, void *client_data); 148 149static struct ib_client sa_client = { 150 .name = "sa", 151 .add = ib_sa_add_one, 152 .remove = ib_sa_remove_one 153}; 154 155static DEFINE_SPINLOCK(idr_lock); 156static DEFINE_IDR(query_idr); 157 158static DEFINE_SPINLOCK(tid_lock); 159static u32 tid; 160 161#define PATH_REC_FIELD(field) \ 162 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 163 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ 164 .field_name = "sa_path_rec:" #field 165 166static const struct ib_field path_rec_table[] = { 167 { PATH_REC_FIELD(service_id), 168 .offset_words = 0, 169 .offset_bits = 0, 170 .size_bits = 64 }, 171 { PATH_REC_FIELD(dgid), 172 .offset_words = 2, 173 .offset_bits = 0, 174 .size_bits = 128 }, 175 { PATH_REC_FIELD(sgid), 176 .offset_words = 6, 177 .offset_bits = 0, 178 .size_bits = 128 }, 179 { PATH_REC_FIELD(dlid), 180 .offset_words = 10, 181 .offset_bits = 0, 182 .size_bits = 16 }, 183 { PATH_REC_FIELD(slid), 184 .offset_words = 10, 185 .offset_bits = 16, 186 .size_bits = 16 }, 187 { PATH_REC_FIELD(raw_traffic), 188 .offset_words = 11, 189 .offset_bits = 0, 190 .size_bits = 1 }, 191 { RESERVED, 192 .offset_words = 11, 193 .offset_bits = 1, 194 .size_bits = 3 }, 195 { PATH_REC_FIELD(flow_label), 196 .offset_words = 11, 197 .offset_bits = 4, 198 .size_bits = 20 }, 199 { PATH_REC_FIELD(hop_limit), 200 .offset_words = 11, 201 .offset_bits = 24, 202 .size_bits = 8 }, 203 { PATH_REC_FIELD(traffic_class), 204 .offset_words = 12, 205 .offset_bits = 0, 206 .size_bits = 8 }, 207 { PATH_REC_FIELD(reversible), 208 .offset_words = 12, 209 .offset_bits = 8, 210 .size_bits = 1 }, 211 { PATH_REC_FIELD(numb_path), 212 .offset_words = 12, 213 .offset_bits = 9, 214 .size_bits = 7 }, 215 { PATH_REC_FIELD(pkey), 216 .offset_words = 12, 217 .offset_bits = 16, 218 .size_bits = 16 }, 219 { PATH_REC_FIELD(qos_class), 220 .offset_words = 13, 221 .offset_bits = 0, 222 .size_bits = 12 }, 223 { PATH_REC_FIELD(sl), 224 .offset_words = 13, 225 .offset_bits = 12, 226 .size_bits = 4 }, 227 { PATH_REC_FIELD(mtu_selector), 228 .offset_words = 13, 229 .offset_bits = 16, 230 .size_bits = 2 }, 231 { PATH_REC_FIELD(mtu), 232 .offset_words = 13, 233 .offset_bits = 18, 234 .size_bits = 6 }, 235 { PATH_REC_FIELD(rate_selector), 236 .offset_words = 13, 237 .offset_bits = 24, 238 .size_bits = 2 }, 239 { PATH_REC_FIELD(rate), 240 .offset_words = 13, 241 .offset_bits = 26, 242 .size_bits = 6 }, 243 { PATH_REC_FIELD(packet_life_time_selector), 244 .offset_words = 14, 245 .offset_bits = 0, 246 .size_bits = 2 }, 247 { PATH_REC_FIELD(packet_life_time), 248 .offset_words = 14, 249 .offset_bits = 2, 250 .size_bits = 6 }, 251 { PATH_REC_FIELD(preference), 252 .offset_words = 14, 253 .offset_bits = 8, 254 .size_bits = 8 }, 255 { RESERVED, 256 .offset_words = 14, 257 .offset_bits = 16, 258 .size_bits = 48 }, 259}; 260 261#define MCMEMBER_REC_FIELD(field) \ 262 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 263 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 264 .field_name = "sa_mcmember_rec:" #field 265 266static const struct ib_field mcmember_rec_table[] = { 267 { MCMEMBER_REC_FIELD(mgid), 268 .offset_words = 0, 269 .offset_bits = 0, 270 .size_bits = 128 }, 271 { MCMEMBER_REC_FIELD(port_gid), 272 .offset_words = 4, 273 .offset_bits = 0, 274 .size_bits = 128 }, 275 { MCMEMBER_REC_FIELD(qkey), 276 .offset_words = 8, 277 .offset_bits = 0, 278 .size_bits = 32 }, 279 { MCMEMBER_REC_FIELD(mlid), 280 .offset_words = 9, 281 .offset_bits = 0, 282 .size_bits = 16 }, 283 { MCMEMBER_REC_FIELD(mtu_selector), 284 .offset_words = 9, 285 .offset_bits = 16, 286 .size_bits = 2 }, 287 { MCMEMBER_REC_FIELD(mtu), 288 .offset_words = 9, 289 .offset_bits = 18, 290 .size_bits = 6 }, 291 { MCMEMBER_REC_FIELD(traffic_class), 292 .offset_words = 9, 293 .offset_bits = 24, 294 .size_bits = 8 }, 295 { MCMEMBER_REC_FIELD(pkey), 296 .offset_words = 10, 297 .offset_bits = 0, 298 .size_bits = 16 }, 299 { MCMEMBER_REC_FIELD(rate_selector), 300 .offset_words = 10, 301 .offset_bits = 16, 302 .size_bits = 2 }, 303 { MCMEMBER_REC_FIELD(rate), 304 .offset_words = 10, 305 .offset_bits = 18, 306 .size_bits = 6 }, 307 { MCMEMBER_REC_FIELD(packet_life_time_selector), 308 .offset_words = 10, 309 .offset_bits = 24, 310 .size_bits = 2 }, 311 { MCMEMBER_REC_FIELD(packet_life_time), 312 .offset_words = 10, 313 .offset_bits = 26, 314 .size_bits = 6 }, 315 { MCMEMBER_REC_FIELD(sl), 316 .offset_words = 11, 317 .offset_bits = 0, 318 .size_bits = 4 }, 319 { MCMEMBER_REC_FIELD(flow_label), 320 .offset_words = 11, 321 .offset_bits = 4, 322 .size_bits = 20 }, 323 { MCMEMBER_REC_FIELD(hop_limit), 324 .offset_words = 11, 325 .offset_bits = 24, 326 .size_bits = 8 }, 327 { MCMEMBER_REC_FIELD(scope), 328 .offset_words = 12, 329 .offset_bits = 0, 330 .size_bits = 4 }, 331 { MCMEMBER_REC_FIELD(join_state), 332 .offset_words = 12, 333 .offset_bits = 4, 334 .size_bits = 4 }, 335 { MCMEMBER_REC_FIELD(proxy_join), 336 .offset_words = 12, 337 .offset_bits = 8, 338 .size_bits = 1 }, 339 { RESERVED, 340 .offset_words = 12, 341 .offset_bits = 9, 342 .size_bits = 23 }, 343}; 344 345#define SERVICE_REC_FIELD(field) \ 346 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 347 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 348 .field_name = "sa_service_rec:" #field 349 350static const struct ib_field service_rec_table[] = { 351 { SERVICE_REC_FIELD(id), 352 .offset_words = 0, 353 .offset_bits = 0, 354 .size_bits = 64 }, 355 { SERVICE_REC_FIELD(gid), 356 .offset_words = 2, 357 .offset_bits = 0, 358 .size_bits = 128 }, 359 { SERVICE_REC_FIELD(pkey), 360 .offset_words = 6, 361 .offset_bits = 0, 362 .size_bits = 16 }, 363 { SERVICE_REC_FIELD(lease), 364 .offset_words = 7, 365 .offset_bits = 0, 366 .size_bits = 32 }, 367 { SERVICE_REC_FIELD(key), 368 .offset_words = 8, 369 .offset_bits = 0, 370 .size_bits = 128 }, 371 { SERVICE_REC_FIELD(name), 372 .offset_words = 12, 373 .offset_bits = 0, 374 .size_bits = 64*8 }, 375 { SERVICE_REC_FIELD(data8), 376 .offset_words = 28, 377 .offset_bits = 0, 378 .size_bits = 16*8 }, 379 { SERVICE_REC_FIELD(data16), 380 .offset_words = 32, 381 .offset_bits = 0, 382 .size_bits = 8*16 }, 383 { SERVICE_REC_FIELD(data32), 384 .offset_words = 36, 385 .offset_bits = 0, 386 .size_bits = 4*32 }, 387 { SERVICE_REC_FIELD(data64), 388 .offset_words = 40, 389 .offset_bits = 0, 390 .size_bits = 2*64 }, 391}; 392 393#define GUIDINFO_REC_FIELD(field) \ 394 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 395 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 396 .field_name = "sa_guidinfo_rec:" #field 397 398static const struct ib_field guidinfo_rec_table[] = { 399 { GUIDINFO_REC_FIELD(lid), 400 .offset_words = 0, 401 .offset_bits = 0, 402 .size_bits = 16 }, 403 { GUIDINFO_REC_FIELD(block_num), 404 .offset_words = 0, 405 .offset_bits = 16, 406 .size_bits = 8 }, 407 { GUIDINFO_REC_FIELD(res1), 408 .offset_words = 0, 409 .offset_bits = 24, 410 .size_bits = 8 }, 411 { GUIDINFO_REC_FIELD(res2), 412 .offset_words = 1, 413 .offset_bits = 0, 414 .size_bits = 32 }, 415 { GUIDINFO_REC_FIELD(guid_info_list), 416 .offset_words = 2, 417 .offset_bits = 0, 418 .size_bits = 512 }, 419}; 420 421static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 422{ 423 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 424} 425 426static inline int ib_sa_query_cancelled(struct ib_sa_query *query) 427{ 428 return (query->flags & IB_SA_CANCEL); 429} 430 431static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, 432 struct ib_sa_query *query) 433{ 434 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1]; 435 struct ib_sa_mad *mad = query->mad_buf->mad; 436 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask; 437 u16 val16; 438 u64 val64; 439 struct rdma_ls_resolve_header *header; 440 441 query->mad_buf->context[1] = NULL; 442 443 /* Construct the family header first */ 444 header = (struct rdma_ls_resolve_header *) 445 skb_put(skb, NLMSG_ALIGN(sizeof(*header))); 446 memcpy(header->device_name, query->port->agent->device->name, 447 LS_DEVICE_NAME_MAX); 448 header->port_num = query->port->port_num; 449 450 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) && 451 sa_rec->reversible != 0) 452 query->path_use = LS_RESOLVE_PATH_USE_GMP; 453 else 454 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL; 455 header->path_use = query->path_use; 456 457 /* Now build the attributes */ 458 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 459 val64 = be64_to_cpu(sa_rec->service_id); 460 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 461 sizeof(val64), &val64); 462 } 463 if (comp_mask & IB_SA_PATH_REC_DGID) 464 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID, 465 sizeof(sa_rec->dgid), &sa_rec->dgid); 466 if (comp_mask & IB_SA_PATH_REC_SGID) 467 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID, 468 sizeof(sa_rec->sgid), &sa_rec->sgid); 469 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 470 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS, 471 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class); 472 473 if (comp_mask & IB_SA_PATH_REC_PKEY) { 474 val16 = be16_to_cpu(sa_rec->pkey); 475 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY, 476 sizeof(val16), &val16); 477 } 478 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) { 479 val16 = be16_to_cpu(sa_rec->qos_class); 480 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS, 481 sizeof(val16), &val16); 482 } 483} 484 485static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask) 486{ 487 int len = 0; 488 489 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) 490 len += nla_total_size(sizeof(u64)); 491 if (comp_mask & IB_SA_PATH_REC_DGID) 492 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 493 if (comp_mask & IB_SA_PATH_REC_SGID) 494 len += nla_total_size(sizeof(struct rdma_nla_ls_gid)); 495 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS) 496 len += nla_total_size(sizeof(u8)); 497 if (comp_mask & IB_SA_PATH_REC_PKEY) 498 len += nla_total_size(sizeof(u16)); 499 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) 500 len += nla_total_size(sizeof(u16)); 501 502 /* 503 * Make sure that at least some of the required comp_mask bits are 504 * set. 505 */ 506 if (WARN_ON(len == 0)) 507 return len; 508 509 /* Add the family header */ 510 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header)); 511 512 return len; 513} 514 515static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) 516{ 517 struct sk_buff *skb = NULL; 518 struct nlmsghdr *nlh; 519 void *data; 520 int ret = 0; 521 struct ib_sa_mad *mad; 522 int len; 523 524 mad = query->mad_buf->mad; 525 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask); 526 if (len <= 0) 527 return -EMSGSIZE; 528 529 skb = nlmsg_new(len, gfp_mask); 530 if (!skb) 531 return -ENOMEM; 532 533 /* Put nlmsg header only for now */ 534 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, 535 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); 536 if (!data) { 537 kfree_skb(skb); 538 return -EMSGSIZE; 539 } 540 541 /* Add attributes */ 542 ib_nl_set_path_rec_attrs(skb, query); 543 544 /* Repair the nlmsg header length */ 545 nlmsg_end(skb, nlh); 546 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); 548 if (!ret) 549 ret = len; 550 else 551 ret = 0; 552 553 return ret; 554} 555 556static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) 557{ 558 unsigned long flags; 559 unsigned long delay; 560 int ret; 561 562 INIT_LIST_HEAD(&query->list); 563 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq); 564 565 /* Put the request on the list first.*/ 566 spin_lock_irqsave(&ib_nl_request_lock, flags); 567 delay = msecs_to_jiffies(sa_local_svc_timeout_ms); 568 query->timeout = delay + jiffies; 569 list_add_tail(&query->list, &ib_nl_request_list); 570 /* Start the timeout if this is the only request */ 571 if (ib_nl_request_list.next == &query->list) 572 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 573 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 574 575 ret = ib_nl_send_msg(query, gfp_mask); 576 if (ret <= 0) { 577 ret = -EIO; 578 /* Remove the request */ 579 spin_lock_irqsave(&ib_nl_request_lock, flags); 580 list_del(&query->list); 581 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 582 } else { 583 ret = 0; 584 } 585 586 return ret; 587} 588 589static int ib_nl_cancel_request(struct ib_sa_query *query) 590{ 591 unsigned long flags; 592 struct ib_sa_query *wait_query; 593 int found = 0; 594 595 spin_lock_irqsave(&ib_nl_request_lock, flags); 596 list_for_each_entry(wait_query, &ib_nl_request_list, list) { 597 /* Let the timeout to take care of the callback */ 598 if (query == wait_query) { 599 query->flags |= IB_SA_CANCEL; 600 query->timeout = jiffies; 601 list_move(&query->list, &ib_nl_request_list); 602 found = 1; 603 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1); 604 break; 605 } 606 } 607 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 608 609 return found; 610} 611 612static void send_handler(struct ib_mad_agent *agent, 613 struct ib_mad_send_wc *mad_send_wc); 614 615static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query, 616 const struct nlmsghdr *nlh) 617{ 618 struct ib_mad_send_wc mad_send_wc; 619 struct ib_sa_mad *mad = NULL; 620 const struct nlattr *head, *curr; 621 struct ib_path_rec_data *rec; 622 int len, rem; 623 u32 mask = 0; 624 int status = -EIO; 625 626 if (query->callback) { 627 head = (const struct nlattr *) nlmsg_data(nlh); 628 len = nlmsg_len(nlh); 629 switch (query->path_use) { 630 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL: 631 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND; 632 break; 633 634 case LS_RESOLVE_PATH_USE_ALL: 635 case LS_RESOLVE_PATH_USE_GMP: 636 default: 637 mask = IB_PATH_PRIMARY | IB_PATH_GMP | 638 IB_PATH_BIDIRECTIONAL; 639 break; 640 } 641 nla_for_each_attr(curr, head, len, rem) { 642 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { 643 rec = nla_data(curr); 644 /* 645 * Get the first one. In the future, we may 646 * need to get up to 6 pathrecords. 647 */ 648 if ((rec->flags & mask) == mask) { 649 mad = query->mad_buf->mad; 650 mad->mad_hdr.method |= 651 IB_MGMT_METHOD_RESP; 652 memcpy(mad->data, rec->path_rec, 653 sizeof(rec->path_rec)); 654 status = 0; 655 break; 656 } 657 } 658 } 659 query->callback(query, status, mad); 660 } 661 662 mad_send_wc.send_buf = query->mad_buf; 663 mad_send_wc.status = IB_WC_SUCCESS; 664 send_handler(query->mad_buf->mad_agent, &mad_send_wc); 665} 666 667static void ib_nl_request_timeout(struct work_struct *work) 668{ 669 unsigned long flags; 670 struct ib_sa_query *query; 671 unsigned long delay; 672 struct ib_mad_send_wc mad_send_wc; 673 int ret; 674 675 spin_lock_irqsave(&ib_nl_request_lock, flags); 676 while (!list_empty(&ib_nl_request_list)) { 677 query = list_entry(ib_nl_request_list.next, 678 struct ib_sa_query, list); 679 680 if (time_after(query->timeout, jiffies)) { 681 delay = query->timeout - jiffies; 682 if ((long)delay <= 0) 683 delay = 1; 684 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay); 685 break; 686 } 687 688 list_del(&query->list); 689 ib_sa_disable_local_svc(query); 690 /* Hold the lock to protect against query cancellation */ 691 if (ib_sa_query_cancelled(query)) 692 ret = -1; 693 else 694 ret = ib_post_send_mad(query->mad_buf, NULL); 695 if (ret) { 696 mad_send_wc.send_buf = query->mad_buf; 697 mad_send_wc.status = IB_WC_WR_FLUSH_ERR; 698 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 699 send_handler(query->port->agent, &mad_send_wc); 700 spin_lock_irqsave(&ib_nl_request_lock, flags); 701 } 702 } 703 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 704} 705 706static int ib_nl_handle_set_timeout(struct sk_buff *skb, 707 struct netlink_callback *cb) 708{ 709 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 710 int timeout, delta, abs_delta; 711 const struct nlattr *attr; 712 unsigned long flags; 713 struct ib_sa_query *query; 714 long delay = 0; 715 struct nlattr *tb[LS_NLA_TYPE_MAX]; 716 int ret; 717 718 if (!netlink_capable(skb, CAP_NET_ADMIN)) 719 return -EPERM; 720 721 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 722 nlmsg_len(nlh), ib_nl_policy); 723 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT]; 724 if (ret || !attr) 725 goto settimeout_out; 726 727 timeout = *(int *) nla_data(attr); 728 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN) 729 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN; 730 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX) 731 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX; 732 733 delta = timeout - sa_local_svc_timeout_ms; 734 if (delta < 0) 735 abs_delta = -delta; 736 else 737 abs_delta = delta; 738 739 if (delta != 0) { 740 spin_lock_irqsave(&ib_nl_request_lock, flags); 741 sa_local_svc_timeout_ms = timeout; 742 list_for_each_entry(query, &ib_nl_request_list, list) { 743 if (delta < 0 && abs_delta > query->timeout) 744 query->timeout = 0; 745 else 746 query->timeout += delta; 747 748 /* Get the new delay from the first entry */ 749 if (!delay) { 750 delay = query->timeout - jiffies; 751 if (delay <= 0) 752 delay = 1; 753 } 754 } 755 if (delay) 756 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 757 (unsigned long)delay); 758 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 759 } 760 761settimeout_out: 762 return skb->len; 763} 764 765static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh) 766{ 767 struct nlattr *tb[LS_NLA_TYPE_MAX]; 768 int ret; 769 770 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR) 771 return 0; 772 773 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh), 774 nlmsg_len(nlh), ib_nl_policy); 775 if (ret) 776 return 0; 777 778 return 1; 779} 780 781static int ib_nl_handle_resolve_resp(struct sk_buff *skb, 782 struct netlink_callback *cb) 783{ 784 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh; 785 unsigned long flags; 786 struct ib_sa_query *query; 787 struct ib_mad_send_buf *send_buf; 788 struct ib_mad_send_wc mad_send_wc; 789 int found = 0; 790 int ret; 791 792 if (!netlink_capable(skb, CAP_NET_ADMIN)) 793 return -EPERM; 794 795 spin_lock_irqsave(&ib_nl_request_lock, flags); 796 list_for_each_entry(query, &ib_nl_request_list, list) { 797 /* 798 * If the query is cancelled, let the timeout routine 799 * take care of it. 800 */ 801 if (nlh->nlmsg_seq == query->seq) { 802 found = !ib_sa_query_cancelled(query); 803 if (found) 804 list_del(&query->list); 805 break; 806 } 807 } 808 809 if (!found) { 810 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 811 goto resp_out; 812 } 813 814 send_buf = query->mad_buf; 815 816 if (!ib_nl_is_good_resolve_resp(nlh)) { 817 /* if the result is a failure, send out the packet via IB */ 818 ib_sa_disable_local_svc(query); 819 ret = ib_post_send_mad(query->mad_buf, NULL); 820 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 821 if (ret) { 822 mad_send_wc.send_buf = send_buf; 823 mad_send_wc.status = IB_WC_GENERAL_ERR; 824 send_handler(query->port->agent, &mad_send_wc); 825 } 826 } else { 827 spin_unlock_irqrestore(&ib_nl_request_lock, flags); 828 ib_nl_process_good_resolve_rsp(query, nlh); 829 } 830 831resp_out: 832 return skb->len; 833} 834 835static struct ibnl_client_cbs ib_sa_cb_table[] = { 836 [RDMA_NL_LS_OP_RESOLVE] = { 837 .dump = ib_nl_handle_resolve_resp, 838 .module = THIS_MODULE }, 839 [RDMA_NL_LS_OP_SET_TIMEOUT] = { 840 .dump = ib_nl_handle_set_timeout, 841 .module = THIS_MODULE }, 842}; 843 844static void free_sm_ah(struct kref *kref) 845{ 846 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 847 848 ib_destroy_ah(sm_ah->ah); 849 kfree(sm_ah); 850} 851 852static void update_sm_ah(struct work_struct *work) 853{ 854 struct ib_sa_port *port = 855 container_of(work, struct ib_sa_port, update_task); 856 struct ib_sa_sm_ah *new_ah; 857 struct ib_port_attr port_attr; 858 struct ib_ah_attr ah_attr; 859 860 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 861 printk(KERN_WARNING "Couldn't query port\n"); 862 return; 863 } 864 865 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); 866 if (!new_ah) { 867 printk(KERN_WARNING "Couldn't allocate new SM AH\n"); 868 return; 869 } 870 871 kref_init(&new_ah->ref); 872 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 873 874 new_ah->pkey_index = 0; 875 if (ib_find_pkey(port->agent->device, port->port_num, 876 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 877 printk(KERN_ERR "Couldn't find index for default PKey\n"); 878 879 memset(&ah_attr, 0, sizeof ah_attr); 880 ah_attr.dlid = port_attr.sm_lid; 881 ah_attr.sl = port_attr.sm_sl; 882 ah_attr.port_num = port->port_num; 883 884 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); 885 if (IS_ERR(new_ah->ah)) { 886 printk(KERN_WARNING "Couldn't create new SM AH\n"); 887 kfree(new_ah); 888 return; 889 } 890 891 spin_lock_irq(&port->ah_lock); 892 if (port->sm_ah) 893 kref_put(&port->sm_ah->ref, free_sm_ah); 894 port->sm_ah = new_ah; 895 spin_unlock_irq(&port->ah_lock); 896 897} 898 899static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) 900{ 901 if (event->event == IB_EVENT_PORT_ERR || 902 event->event == IB_EVENT_PORT_ACTIVE || 903 event->event == IB_EVENT_LID_CHANGE || 904 event->event == IB_EVENT_PKEY_CHANGE || 905 event->event == IB_EVENT_SM_CHANGE || 906 event->event == IB_EVENT_CLIENT_REREGISTER) { 907 unsigned long flags; 908 struct ib_sa_device *sa_dev = 909 container_of(handler, typeof(*sa_dev), event_handler); 910 struct ib_sa_port *port = 911 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 912 913 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 914 return; 915 916 spin_lock_irqsave(&port->ah_lock, flags); 917 if (port->sm_ah) 918 kref_put(&port->sm_ah->ref, free_sm_ah); 919 port->sm_ah = NULL; 920 spin_unlock_irqrestore(&port->ah_lock, flags); 921 922 queue_work(ib_wq, &sa_dev->port[event->element.port_num - 923 sa_dev->start_port].update_task); 924 } 925} 926 927void ib_sa_register_client(struct ib_sa_client *client) 928{ 929 atomic_set(&client->users, 1); 930 init_completion(&client->comp); 931} 932EXPORT_SYMBOL(ib_sa_register_client); 933 934void ib_sa_unregister_client(struct ib_sa_client *client) 935{ 936 ib_sa_client_put(client); 937 wait_for_completion(&client->comp); 938} 939EXPORT_SYMBOL(ib_sa_unregister_client); 940 941/** 942 * ib_sa_cancel_query - try to cancel an SA query 943 * @id:ID of query to cancel 944 * @query:query pointer to cancel 945 * 946 * Try to cancel an SA query. If the id and query don't match up or 947 * the query has already completed, nothing is done. Otherwise the 948 * query is canceled and will complete with a status of -EINTR. 949 */ 950void ib_sa_cancel_query(int id, struct ib_sa_query *query) 951{ 952 unsigned long flags; 953 struct ib_mad_agent *agent; 954 struct ib_mad_send_buf *mad_buf; 955 956 spin_lock_irqsave(&idr_lock, flags); 957 if (idr_find(&query_idr, id) != query) { 958 spin_unlock_irqrestore(&idr_lock, flags); 959 return; 960 } 961 agent = query->port->agent; 962 mad_buf = query->mad_buf; 963 spin_unlock_irqrestore(&idr_lock, flags); 964 965 /* 966 * If the query is still on the netlink request list, schedule 967 * it to be cancelled by the timeout routine. Otherwise, it has been 968 * sent to the MAD layer and has to be cancelled from there. 969 */ 970 if (!ib_nl_cancel_request(query)) 971 ib_cancel_mad(agent, mad_buf); 972} 973EXPORT_SYMBOL(ib_sa_cancel_query); 974 975static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 976{ 977 struct ib_sa_device *sa_dev; 978 struct ib_sa_port *port; 979 unsigned long flags; 980 u8 src_path_mask; 981 982 sa_dev = ib_get_client_data(device, &sa_client); 983 if (!sa_dev) 984 return 0x7f; 985 986 port = &sa_dev->port[port_num - sa_dev->start_port]; 987 spin_lock_irqsave(&port->ah_lock, flags); 988 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 989 spin_unlock_irqrestore(&port->ah_lock, flags); 990 991 return src_path_mask; 992} 993 994int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 995 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 996{ 997 int ret; 998 u16 gid_index; 999 int force_grh; 1000 1001 memset(ah_attr, 0, sizeof *ah_attr); 1002 ah_attr->dlid = be16_to_cpu(rec->dlid); 1003 ah_attr->sl = rec->sl; 1004 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 1005 get_src_path_mask(device, port_num); 1006 ah_attr->port_num = port_num; 1007 ah_attr->static_rate = rec->rate; 1008 1009 force_grh = rdma_cap_eth_ah(device, port_num); 1010 1011 if (rec->hop_limit > 1 || force_grh) { 1012 struct net_device *ndev = ib_get_ndev_from_path(rec); 1013 1014 ah_attr->ah_flags = IB_AH_GRH; 1015 ah_attr->grh.dgid = rec->dgid; 1016 1017 ret = ib_find_cached_gid(device, &rec->sgid, ndev, &port_num, 1018 &gid_index); 1019 if (ret) { 1020 if (ndev) 1021 dev_put(ndev); 1022 return ret; 1023 } 1024 1025 ah_attr->grh.sgid_index = gid_index; 1026 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 1027 ah_attr->grh.hop_limit = rec->hop_limit; 1028 ah_attr->grh.traffic_class = rec->traffic_class; 1029 if (ndev) 1030 dev_put(ndev); 1031 } 1032 if (force_grh) { 1033 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); 1034 } 1035 return 0; 1036} 1037EXPORT_SYMBOL(ib_init_ah_from_path); 1038 1039static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 1040{ 1041 unsigned long flags; 1042 1043 spin_lock_irqsave(&query->port->ah_lock, flags); 1044 if (!query->port->sm_ah) { 1045 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1046 return -EAGAIN; 1047 } 1048 kref_get(&query->port->sm_ah->ref); 1049 query->sm_ah = query->port->sm_ah; 1050 spin_unlock_irqrestore(&query->port->ah_lock, flags); 1051 1052 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 1053 query->sm_ah->pkey_index, 1054 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 1055 gfp_mask, 1056 IB_MGMT_BASE_VERSION); 1057 if (IS_ERR(query->mad_buf)) { 1058 kref_put(&query->sm_ah->ref, free_sm_ah); 1059 return -ENOMEM; 1060 } 1061 1062 query->mad_buf->ah = query->sm_ah->ah; 1063 1064 return 0; 1065} 1066 1067static void free_mad(struct ib_sa_query *query) 1068{ 1069 ib_free_send_mad(query->mad_buf); 1070 kref_put(&query->sm_ah->ref, free_sm_ah); 1071} 1072 1073static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) 1074{ 1075 unsigned long flags; 1076 1077 memset(mad, 0, sizeof *mad); 1078 1079 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 1080 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 1081 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 1082 1083 spin_lock_irqsave(&tid_lock, flags); 1084 mad->mad_hdr.tid = 1085 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 1086 spin_unlock_irqrestore(&tid_lock, flags); 1087} 1088 1089static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 1090{ 1091 bool preload = gfpflags_allow_blocking(gfp_mask); 1092 unsigned long flags; 1093 int ret, id; 1094 1095 if (preload) 1096 idr_preload(gfp_mask); 1097 spin_lock_irqsave(&idr_lock, flags); 1098 1099 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 1100 1101 spin_unlock_irqrestore(&idr_lock, flags); 1102 if (preload) 1103 idr_preload_end(); 1104 if (id < 0) 1105 return id; 1106 1107 query->mad_buf->timeout_ms = timeout_ms; 1108 query->mad_buf->context[0] = query; 1109 query->id = id; 1110 1111 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 1112 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) { 1113 if (!ib_nl_make_request(query, gfp_mask)) 1114 return id; 1115 } 1116 ib_sa_disable_local_svc(query); 1117 } 1118 1119 ret = ib_post_send_mad(query->mad_buf, NULL); 1120 if (ret) { 1121 spin_lock_irqsave(&idr_lock, flags); 1122 idr_remove(&query_idr, id); 1123 spin_unlock_irqrestore(&idr_lock, flags); 1124 } 1125 1126 /* 1127 * It's not safe to dereference query any more, because the 1128 * send may already have completed and freed the query in 1129 * another context. 1130 */ 1131 return ret ? ret : id; 1132} 1133 1134void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) 1135{ 1136 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 1137} 1138EXPORT_SYMBOL(ib_sa_unpack_path); 1139 1140void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) 1141{ 1142 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 1143} 1144EXPORT_SYMBOL(ib_sa_pack_path); 1145 1146static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 1147 int status, 1148 struct ib_sa_mad *mad) 1149{ 1150 struct ib_sa_path_query *query = 1151 container_of(sa_query, struct ib_sa_path_query, sa_query); 1152 1153 if (mad) { 1154 struct ib_sa_path_rec rec; 1155 1156 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 1157 mad->data, &rec); 1158 rec.net = NULL; 1159 rec.ifindex = 0; 1160 memset(rec.dmac, 0, ETH_ALEN); 1161 query->callback(status, &rec, query->context); 1162 } else 1163 query->callback(status, NULL, query->context); 1164} 1165 1166static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 1167{ 1168 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 1169} 1170 1171/** 1172 * ib_sa_path_rec_get - Start a Path get query 1173 * @client:SA client 1174 * @device:device to send query on 1175 * @port_num: port number to send query on 1176 * @rec:Path Record to send in query 1177 * @comp_mask:component mask to send in query 1178 * @timeout_ms:time to wait for response 1179 * @gfp_mask:GFP mask to use for internal allocations 1180 * @callback:function called when query completes, times out or is 1181 * canceled 1182 * @context:opaque user context passed to callback 1183 * @sa_query:query context, used to cancel query 1184 * 1185 * Send a Path Record Get query to the SA to look up a path. The 1186 * callback function will be called when the query completes (or 1187 * fails); status is 0 for a successful response, -EINTR if the query 1188 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1189 * occurred sending the query. The resp parameter of the callback is 1190 * only valid if status is 0. 1191 * 1192 * If the return value of ib_sa_path_rec_get() is negative, it is an 1193 * error code. Otherwise it is a query ID that can be used to cancel 1194 * the query. 1195 */ 1196int ib_sa_path_rec_get(struct ib_sa_client *client, 1197 struct ib_device *device, u8 port_num, 1198 struct ib_sa_path_rec *rec, 1199 ib_sa_comp_mask comp_mask, 1200 int timeout_ms, gfp_t gfp_mask, 1201 void (*callback)(int status, 1202 struct ib_sa_path_rec *resp, 1203 void *context), 1204 void *context, 1205 struct ib_sa_query **sa_query) 1206{ 1207 struct ib_sa_path_query *query; 1208 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1209 struct ib_sa_port *port; 1210 struct ib_mad_agent *agent; 1211 struct ib_sa_mad *mad; 1212 int ret; 1213 1214 if (!sa_dev) 1215 return -ENODEV; 1216 1217 port = &sa_dev->port[port_num - sa_dev->start_port]; 1218 agent = port->agent; 1219 1220 query = kzalloc(sizeof(*query), gfp_mask); 1221 if (!query) 1222 return -ENOMEM; 1223 1224 query->sa_query.port = port; 1225 ret = alloc_mad(&query->sa_query, gfp_mask); 1226 if (ret) 1227 goto err1; 1228 1229 ib_sa_client_get(client); 1230 query->sa_query.client = client; 1231 query->callback = callback; 1232 query->context = context; 1233 1234 mad = query->sa_query.mad_buf->mad; 1235 init_mad(mad, agent); 1236 1237 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 1238 query->sa_query.release = ib_sa_path_rec_release; 1239 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1240 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 1241 mad->sa_hdr.comp_mask = comp_mask; 1242 1243 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 1244 1245 *sa_query = &query->sa_query; 1246 1247 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 1248 query->sa_query.mad_buf->context[1] = rec; 1249 1250 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1251 if (ret < 0) 1252 goto err2; 1253 1254 return ret; 1255 1256err2: 1257 *sa_query = NULL; 1258 ib_sa_client_put(query->sa_query.client); 1259 free_mad(&query->sa_query); 1260 1261err1: 1262 kfree(query); 1263 return ret; 1264} 1265EXPORT_SYMBOL(ib_sa_path_rec_get); 1266 1267static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 1268 int status, 1269 struct ib_sa_mad *mad) 1270{ 1271 struct ib_sa_service_query *query = 1272 container_of(sa_query, struct ib_sa_service_query, sa_query); 1273 1274 if (mad) { 1275 struct ib_sa_service_rec rec; 1276 1277 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 1278 mad->data, &rec); 1279 query->callback(status, &rec, query->context); 1280 } else 1281 query->callback(status, NULL, query->context); 1282} 1283 1284static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 1285{ 1286 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 1287} 1288 1289/** 1290 * ib_sa_service_rec_query - Start Service Record operation 1291 * @client:SA client 1292 * @device:device to send request on 1293 * @port_num: port number to send request on 1294 * @method:SA method - should be get, set, or delete 1295 * @rec:Service Record to send in request 1296 * @comp_mask:component mask to send in request 1297 * @timeout_ms:time to wait for response 1298 * @gfp_mask:GFP mask to use for internal allocations 1299 * @callback:function called when request completes, times out or is 1300 * canceled 1301 * @context:opaque user context passed to callback 1302 * @sa_query:request context, used to cancel request 1303 * 1304 * Send a Service Record set/get/delete to the SA to register, 1305 * unregister or query a service record. 1306 * The callback function will be called when the request completes (or 1307 * fails); status is 0 for a successful response, -EINTR if the query 1308 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1309 * occurred sending the query. The resp parameter of the callback is 1310 * only valid if status is 0. 1311 * 1312 * If the return value of ib_sa_service_rec_query() is negative, it is an 1313 * error code. Otherwise it is a request ID that can be used to cancel 1314 * the query. 1315 */ 1316int ib_sa_service_rec_query(struct ib_sa_client *client, 1317 struct ib_device *device, u8 port_num, u8 method, 1318 struct ib_sa_service_rec *rec, 1319 ib_sa_comp_mask comp_mask, 1320 int timeout_ms, gfp_t gfp_mask, 1321 void (*callback)(int status, 1322 struct ib_sa_service_rec *resp, 1323 void *context), 1324 void *context, 1325 struct ib_sa_query **sa_query) 1326{ 1327 struct ib_sa_service_query *query; 1328 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1329 struct ib_sa_port *port; 1330 struct ib_mad_agent *agent; 1331 struct ib_sa_mad *mad; 1332 int ret; 1333 1334 if (!sa_dev) 1335 return -ENODEV; 1336 1337 port = &sa_dev->port[port_num - sa_dev->start_port]; 1338 agent = port->agent; 1339 1340 if (method != IB_MGMT_METHOD_GET && 1341 method != IB_MGMT_METHOD_SET && 1342 method != IB_SA_METHOD_DELETE) 1343 return -EINVAL; 1344 1345 query = kzalloc(sizeof(*query), gfp_mask); 1346 if (!query) 1347 return -ENOMEM; 1348 1349 query->sa_query.port = port; 1350 ret = alloc_mad(&query->sa_query, gfp_mask); 1351 if (ret) 1352 goto err1; 1353 1354 ib_sa_client_get(client); 1355 query->sa_query.client = client; 1356 query->callback = callback; 1357 query->context = context; 1358 1359 mad = query->sa_query.mad_buf->mad; 1360 init_mad(mad, agent); 1361 1362 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1363 query->sa_query.release = ib_sa_service_rec_release; 1364 mad->mad_hdr.method = method; 1365 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1366 mad->sa_hdr.comp_mask = comp_mask; 1367 1368 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1369 rec, mad->data); 1370 1371 *sa_query = &query->sa_query; 1372 1373 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1374 if (ret < 0) 1375 goto err2; 1376 1377 return ret; 1378 1379err2: 1380 *sa_query = NULL; 1381 ib_sa_client_put(query->sa_query.client); 1382 free_mad(&query->sa_query); 1383 1384err1: 1385 kfree(query); 1386 return ret; 1387} 1388EXPORT_SYMBOL(ib_sa_service_rec_query); 1389 1390static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1391 int status, 1392 struct ib_sa_mad *mad) 1393{ 1394 struct ib_sa_mcmember_query *query = 1395 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1396 1397 if (mad) { 1398 struct ib_sa_mcmember_rec rec; 1399 1400 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1401 mad->data, &rec); 1402 query->callback(status, &rec, query->context); 1403 } else 1404 query->callback(status, NULL, query->context); 1405} 1406 1407static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1408{ 1409 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1410} 1411 1412int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1413 struct ib_device *device, u8 port_num, 1414 u8 method, 1415 struct ib_sa_mcmember_rec *rec, 1416 ib_sa_comp_mask comp_mask, 1417 int timeout_ms, gfp_t gfp_mask, 1418 void (*callback)(int status, 1419 struct ib_sa_mcmember_rec *resp, 1420 void *context), 1421 void *context, 1422 struct ib_sa_query **sa_query) 1423{ 1424 struct ib_sa_mcmember_query *query; 1425 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1426 struct ib_sa_port *port; 1427 struct ib_mad_agent *agent; 1428 struct ib_sa_mad *mad; 1429 int ret; 1430 1431 if (!sa_dev) 1432 return -ENODEV; 1433 1434 port = &sa_dev->port[port_num - sa_dev->start_port]; 1435 agent = port->agent; 1436 1437 query = kzalloc(sizeof(*query), gfp_mask); 1438 if (!query) 1439 return -ENOMEM; 1440 1441 query->sa_query.port = port; 1442 ret = alloc_mad(&query->sa_query, gfp_mask); 1443 if (ret) 1444 goto err1; 1445 1446 ib_sa_client_get(client); 1447 query->sa_query.client = client; 1448 query->callback = callback; 1449 query->context = context; 1450 1451 mad = query->sa_query.mad_buf->mad; 1452 init_mad(mad, agent); 1453 1454 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1455 query->sa_query.release = ib_sa_mcmember_rec_release; 1456 mad->mad_hdr.method = method; 1457 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1458 mad->sa_hdr.comp_mask = comp_mask; 1459 1460 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1461 rec, mad->data); 1462 1463 *sa_query = &query->sa_query; 1464 1465 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1466 if (ret < 0) 1467 goto err2; 1468 1469 return ret; 1470 1471err2: 1472 *sa_query = NULL; 1473 ib_sa_client_put(query->sa_query.client); 1474 free_mad(&query->sa_query); 1475 1476err1: 1477 kfree(query); 1478 return ret; 1479} 1480 1481/* Support GuidInfoRecord */ 1482static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1483 int status, 1484 struct ib_sa_mad *mad) 1485{ 1486 struct ib_sa_guidinfo_query *query = 1487 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1488 1489 if (mad) { 1490 struct ib_sa_guidinfo_rec rec; 1491 1492 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1493 mad->data, &rec); 1494 query->callback(status, &rec, query->context); 1495 } else 1496 query->callback(status, NULL, query->context); 1497} 1498 1499static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1500{ 1501 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1502} 1503 1504int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1505 struct ib_device *device, u8 port_num, 1506 struct ib_sa_guidinfo_rec *rec, 1507 ib_sa_comp_mask comp_mask, u8 method, 1508 int timeout_ms, gfp_t gfp_mask, 1509 void (*callback)(int status, 1510 struct ib_sa_guidinfo_rec *resp, 1511 void *context), 1512 void *context, 1513 struct ib_sa_query **sa_query) 1514{ 1515 struct ib_sa_guidinfo_query *query; 1516 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1517 struct ib_sa_port *port; 1518 struct ib_mad_agent *agent; 1519 struct ib_sa_mad *mad; 1520 int ret; 1521 1522 if (!sa_dev) 1523 return -ENODEV; 1524 1525 if (method != IB_MGMT_METHOD_GET && 1526 method != IB_MGMT_METHOD_SET && 1527 method != IB_SA_METHOD_DELETE) { 1528 return -EINVAL; 1529 } 1530 1531 port = &sa_dev->port[port_num - sa_dev->start_port]; 1532 agent = port->agent; 1533 1534 query = kzalloc(sizeof(*query), gfp_mask); 1535 if (!query) 1536 return -ENOMEM; 1537 1538 query->sa_query.port = port; 1539 ret = alloc_mad(&query->sa_query, gfp_mask); 1540 if (ret) 1541 goto err1; 1542 1543 ib_sa_client_get(client); 1544 query->sa_query.client = client; 1545 query->callback = callback; 1546 query->context = context; 1547 1548 mad = query->sa_query.mad_buf->mad; 1549 init_mad(mad, agent); 1550 1551 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1552 query->sa_query.release = ib_sa_guidinfo_rec_release; 1553 1554 mad->mad_hdr.method = method; 1555 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1556 mad->sa_hdr.comp_mask = comp_mask; 1557 1558 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1559 mad->data); 1560 1561 *sa_query = &query->sa_query; 1562 1563 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1564 if (ret < 0) 1565 goto err2; 1566 1567 return ret; 1568 1569err2: 1570 *sa_query = NULL; 1571 ib_sa_client_put(query->sa_query.client); 1572 free_mad(&query->sa_query); 1573 1574err1: 1575 kfree(query); 1576 return ret; 1577} 1578EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1579 1580static void send_handler(struct ib_mad_agent *agent, 1581 struct ib_mad_send_wc *mad_send_wc) 1582{ 1583 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1584 unsigned long flags; 1585 1586 if (query->callback) 1587 switch (mad_send_wc->status) { 1588 case IB_WC_SUCCESS: 1589 /* No callback -- already got recv */ 1590 break; 1591 case IB_WC_RESP_TIMEOUT_ERR: 1592 query->callback(query, -ETIMEDOUT, NULL); 1593 break; 1594 case IB_WC_WR_FLUSH_ERR: 1595 query->callback(query, -EINTR, NULL); 1596 break; 1597 default: 1598 query->callback(query, -EIO, NULL); 1599 break; 1600 } 1601 1602 spin_lock_irqsave(&idr_lock, flags); 1603 idr_remove(&query_idr, query->id); 1604 spin_unlock_irqrestore(&idr_lock, flags); 1605 1606 free_mad(query); 1607 ib_sa_client_put(query->client); 1608 query->release(query); 1609} 1610 1611static void recv_handler(struct ib_mad_agent *mad_agent, 1612 struct ib_mad_recv_wc *mad_recv_wc) 1613{ 1614 struct ib_sa_query *query; 1615 struct ib_mad_send_buf *mad_buf; 1616 1617 mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; 1618 query = mad_buf->context[0]; 1619 1620 if (query->callback) { 1621 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 1622 query->callback(query, 1623 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 1624 -EINVAL : 0, 1625 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 1626 else 1627 query->callback(query, -EIO, NULL); 1628 } 1629 1630 ib_free_recv_mad(mad_recv_wc); 1631} 1632 1633static void ib_sa_add_one(struct ib_device *device) 1634{ 1635 struct ib_sa_device *sa_dev; 1636 int s, e, i; 1637 int count = 0; 1638 1639 s = rdma_start_port(device); 1640 e = rdma_end_port(device); 1641 1642 sa_dev = kzalloc(sizeof *sa_dev + 1643 (e - s + 1) * sizeof (struct ib_sa_port), 1644 GFP_KERNEL); 1645 if (!sa_dev) 1646 return; 1647 1648 sa_dev->start_port = s; 1649 sa_dev->end_port = e; 1650 1651 for (i = 0; i <= e - s; ++i) { 1652 spin_lock_init(&sa_dev->port[i].ah_lock); 1653 if (!rdma_cap_ib_sa(device, i + 1)) 1654 continue; 1655 1656 sa_dev->port[i].sm_ah = NULL; 1657 sa_dev->port[i].port_num = i + s; 1658 1659 sa_dev->port[i].agent = 1660 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1661 NULL, 0, send_handler, 1662 recv_handler, sa_dev, 0); 1663 if (IS_ERR(sa_dev->port[i].agent)) 1664 goto err; 1665 1666 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 1667 1668 count++; 1669 } 1670 1671 if (!count) 1672 goto free; 1673 1674 ib_set_client_data(device, &sa_client, sa_dev); 1675 1676 /* 1677 * We register our event handler after everything is set up, 1678 * and then update our cached info after the event handler is 1679 * registered to avoid any problems if a port changes state 1680 * during our initialization. 1681 */ 1682 1683 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 1684 if (ib_register_event_handler(&sa_dev->event_handler)) 1685 goto err; 1686 1687 for (i = 0; i <= e - s; ++i) { 1688 if (rdma_cap_ib_sa(device, i + 1)) 1689 update_sm_ah(&sa_dev->port[i].update_task); 1690 } 1691 1692 return; 1693 1694err: 1695 while (--i >= 0) { 1696 if (rdma_cap_ib_sa(device, i + 1)) 1697 ib_unregister_mad_agent(sa_dev->port[i].agent); 1698 } 1699free: 1700 kfree(sa_dev); 1701 return; 1702} 1703 1704static void ib_sa_remove_one(struct ib_device *device, void *client_data) 1705{ 1706 struct ib_sa_device *sa_dev = client_data; 1707 int i; 1708 1709 if (!sa_dev) 1710 return; 1711 1712 ib_unregister_event_handler(&sa_dev->event_handler); 1713 1714 flush_workqueue(ib_wq); 1715 1716 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1717 if (rdma_cap_ib_sa(device, i + 1)) { 1718 ib_unregister_mad_agent(sa_dev->port[i].agent); 1719 if (sa_dev->port[i].sm_ah) 1720 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1721 } 1722 1723 } 1724 1725 kfree(sa_dev); 1726} 1727 1728static int __init ib_sa_init(void) 1729{ 1730 int ret; 1731 1732 get_random_bytes(&tid, sizeof tid); 1733 1734 atomic_set(&ib_nl_sa_request_seq, 0); 1735 1736 ret = ib_register_client(&sa_client); 1737 if (ret) { 1738 printk(KERN_ERR "Couldn't register ib_sa client\n"); 1739 goto err1; 1740 } 1741 1742 ret = mcast_init(); 1743 if (ret) { 1744 printk(KERN_ERR "Couldn't initialize multicast handling\n"); 1745 goto err2; 1746 } 1747 1748 ib_nl_wq = create_singlethread_workqueue("ib_nl_sa_wq"); 1749 if (!ib_nl_wq) { 1750 ret = -ENOMEM; 1751 goto err3; 1752 } 1753 1754 if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS, 1755 ib_sa_cb_table)) { 1756 pr_err("Failed to add netlink callback\n"); 1757 ret = -EINVAL; 1758 goto err4; 1759 } 1760 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout); 1761 1762 return 0; 1763err4: 1764 destroy_workqueue(ib_nl_wq); 1765err3: 1766 mcast_cleanup(); 1767err2: 1768 ib_unregister_client(&sa_client); 1769err1: 1770 return ret; 1771} 1772 1773static void __exit ib_sa_cleanup(void) 1774{ 1775 ibnl_remove_client(RDMA_NL_LS); 1776 cancel_delayed_work(&ib_nl_timed_work); 1777 flush_workqueue(ib_nl_wq); 1778 destroy_workqueue(ib_nl_wq); 1779 mcast_cleanup(); 1780 ib_unregister_client(&sa_client); 1781 idr_destroy(&query_idr); 1782} 1783 1784module_init(ib_sa_init); 1785module_exit(ib_sa_cleanup); 1786