root/drivers/infiniband/core/user_mad.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. MKDEV
  2. ib_umad_dev_get
  3. ib_umad_dev_put
  4. hdr_size
  5. __get_agent
  6. queue_packet
  7. dequeue_send
  8. send_handler
  9. recv_handler
  10. copy_recv_mad
  11. copy_send_mad
  12. ib_umad_read
  13. copy_rmpp_mad
  14. same_destination
  15. is_duplicate
  16. ib_umad_write
  17. ib_umad_poll
  18. ib_umad_reg_agent
  19. ib_umad_reg_agent2
  20. ib_umad_unreg_agent
  21. ib_umad_enable_pkey
  22. ib_umad_ioctl
  23. ib_umad_compat_ioctl
  24. ib_umad_open
  25. ib_umad_close
  26. ib_umad_sm_open
  27. ib_umad_sm_close
  28. get_port
  29. ib_umad_get_nl_info
  30. ib_issm_get_nl_info
  31. ibdev_show
  32. port_show
  33. umad_devnode
  34. abi_version_show
  35. ib_umad_release_port
  36. ib_umad_init_port_dev
  37. ib_umad_init_port
  38. ib_umad_kill_port
  39. ib_umad_add_one
  40. ib_umad_remove_one
  41. ib_umad_init
  42. ib_umad_cleanup

   1 /*
   2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
   4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   5  * Copyright (c) 2008 Cisco. All rights reserved.
   6  *
   7  * This software is available to you under a choice of one of two
   8  * licenses.  You may choose to be licensed under the terms of the GNU
   9  * General Public License (GPL) Version 2, available from the file
  10  * COPYING in the main directory of this source tree, or the
  11  * OpenIB.org BSD license below:
  12  *
  13  *     Redistribution and use in source and binary forms, with or
  14  *     without modification, are permitted provided that the following
  15  *     conditions are met:
  16  *
  17  *      - Redistributions of source code must retain the above
  18  *        copyright notice, this list of conditions and the following
  19  *        disclaimer.
  20  *
  21  *      - Redistributions in binary form must reproduce the above
  22  *        copyright notice, this list of conditions and the following
  23  *        disclaimer in the documentation and/or other materials
  24  *        provided with the distribution.
  25  *
  26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33  * SOFTWARE.
  34  */
  35 
  36 #define pr_fmt(fmt) "user_mad: " fmt
  37 
  38 #include <linux/module.h>
  39 #include <linux/init.h>
  40 #include <linux/device.h>
  41 #include <linux/err.h>
  42 #include <linux/fs.h>
  43 #include <linux/cdev.h>
  44 #include <linux/dma-mapping.h>
  45 #include <linux/poll.h>
  46 #include <linux/mutex.h>
  47 #include <linux/kref.h>
  48 #include <linux/compat.h>
  49 #include <linux/sched.h>
  50 #include <linux/semaphore.h>
  51 #include <linux/slab.h>
  52 #include <linux/nospec.h>
  53 
  54 #include <linux/uaccess.h>
  55 
  56 #include <rdma/ib_mad.h>
  57 #include <rdma/ib_user_mad.h>
  58 #include <rdma/rdma_netlink.h>
  59 
  60 #include "core_priv.h"
  61 
  62 MODULE_AUTHOR("Roland Dreier");
  63 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
  64 MODULE_LICENSE("Dual BSD/GPL");
  65 
  66 enum {
  67         IB_UMAD_MAX_PORTS  = RDMA_MAX_PORTS,
  68         IB_UMAD_MAX_AGENTS = 32,
  69 
  70         IB_UMAD_MAJOR      = 231,
  71         IB_UMAD_MINOR_BASE = 0,
  72         IB_UMAD_NUM_FIXED_MINOR = 64,
  73         IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
  74         IB_ISSM_MINOR_BASE        = IB_UMAD_NUM_FIXED_MINOR,
  75 };
  76 
  77 /*
  78  * Our lifetime rules for these structs are the following:
  79  * device special file is opened, we take a reference on the
  80  * ib_umad_port's struct ib_umad_device. We drop these
  81  * references in the corresponding close().
  82  *
  83  * In addition to references coming from open character devices, there
  84  * is one more reference to each ib_umad_device representing the
  85  * module's reference taken when allocating the ib_umad_device in
  86  * ib_umad_add_one().
  87  *
  88  * When destroying an ib_umad_device, we drop the module's reference.
  89  */
  90 
  91 struct ib_umad_port {
  92         struct cdev           cdev;
  93         struct device         dev;
  94         struct cdev           sm_cdev;
  95         struct device         sm_dev;
  96         struct semaphore       sm_sem;
  97 
  98         struct mutex           file_mutex;
  99         struct list_head       file_list;
 100 
 101         struct ib_device      *ib_dev;
 102         struct ib_umad_device *umad_dev;
 103         int                    dev_num;
 104         u8                     port_num;
 105 };
 106 
 107 struct ib_umad_device {
 108         struct kref kref;
 109         struct ib_umad_port ports[];
 110 };
 111 
 112 struct ib_umad_file {
 113         struct mutex            mutex;
 114         struct ib_umad_port    *port;
 115         struct list_head        recv_list;
 116         struct list_head        send_list;
 117         struct list_head        port_list;
 118         spinlock_t              send_lock;
 119         wait_queue_head_t       recv_wait;
 120         struct ib_mad_agent    *agent[IB_UMAD_MAX_AGENTS];
 121         int                     agents_dead;
 122         u8                      use_pkey_index;
 123         u8                      already_used;
 124 };
 125 
 126 struct ib_umad_packet {
 127         struct ib_mad_send_buf *msg;
 128         struct ib_mad_recv_wc  *recv_wc;
 129         struct list_head   list;
 130         int                length;
 131         struct ib_user_mad mad;
 132 };
 133 
 134 #define CREATE_TRACE_POINTS
 135 #include <trace/events/ib_umad.h>
 136 
 137 static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
 138 static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
 139                                    IB_UMAD_NUM_FIXED_MINOR;
 140 static dev_t dynamic_umad_dev;
 141 static dev_t dynamic_issm_dev;
 142 
 143 static DEFINE_IDA(umad_ida);
 144 
 145 static void ib_umad_add_one(struct ib_device *device);
 146 static void ib_umad_remove_one(struct ib_device *device, void *client_data);
 147 
 148 static void ib_umad_dev_free(struct kref *kref)
 149 {
 150         struct ib_umad_device *dev =
 151                 container_of(kref, struct ib_umad_device, kref);
 152 
 153         kfree(dev);
 154 }
 155 
 156 static void ib_umad_dev_get(struct ib_umad_device *dev)
 157 {
 158         kref_get(&dev->kref);
 159 }
 160 
 161 static void ib_umad_dev_put(struct ib_umad_device *dev)
 162 {
 163         kref_put(&dev->kref, ib_umad_dev_free);
 164 }
 165 
 166 static int hdr_size(struct ib_umad_file *file)
 167 {
 168         return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
 169                 sizeof (struct ib_user_mad_hdr_old);
 170 }
 171 
 172 /* caller must hold file->mutex */
 173 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
 174 {
 175         return file->agents_dead ? NULL : file->agent[id];
 176 }
 177 
 178 static int queue_packet(struct ib_umad_file *file,
 179                         struct ib_mad_agent *agent,
 180                         struct ib_umad_packet *packet)
 181 {
 182         int ret = 1;
 183 
 184         mutex_lock(&file->mutex);
 185 
 186         for (packet->mad.hdr.id = 0;
 187              packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
 188              packet->mad.hdr.id++)
 189                 if (agent == __get_agent(file, packet->mad.hdr.id)) {
 190                         list_add_tail(&packet->list, &file->recv_list);
 191                         wake_up_interruptible(&file->recv_wait);
 192                         ret = 0;
 193                         break;
 194                 }
 195 
 196         mutex_unlock(&file->mutex);
 197 
 198         return ret;
 199 }
 200 
 201 static void dequeue_send(struct ib_umad_file *file,
 202                          struct ib_umad_packet *packet)
 203 {
 204         spin_lock_irq(&file->send_lock);
 205         list_del(&packet->list);
 206         spin_unlock_irq(&file->send_lock);
 207 }
 208 
 209 static void send_handler(struct ib_mad_agent *agent,
 210                          struct ib_mad_send_wc *send_wc)
 211 {
 212         struct ib_umad_file *file = agent->context;
 213         struct ib_umad_packet *packet = send_wc->send_buf->context[0];
 214 
 215         dequeue_send(file, packet);
 216         rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE);
 217         ib_free_send_mad(packet->msg);
 218 
 219         if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
 220                 packet->length = IB_MGMT_MAD_HDR;
 221                 packet->mad.hdr.status = ETIMEDOUT;
 222                 if (!queue_packet(file, agent, packet))
 223                         return;
 224         }
 225         kfree(packet);
 226 }
 227 
 228 static void recv_handler(struct ib_mad_agent *agent,
 229                          struct ib_mad_send_buf *send_buf,
 230                          struct ib_mad_recv_wc *mad_recv_wc)
 231 {
 232         struct ib_umad_file *file = agent->context;
 233         struct ib_umad_packet *packet;
 234 
 235         if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
 236                 goto err1;
 237 
 238         packet = kzalloc(sizeof *packet, GFP_KERNEL);
 239         if (!packet)
 240                 goto err1;
 241 
 242         packet->length = mad_recv_wc->mad_len;
 243         packet->recv_wc = mad_recv_wc;
 244 
 245         packet->mad.hdr.status     = 0;
 246         packet->mad.hdr.length     = hdr_size(file) + mad_recv_wc->mad_len;
 247         packet->mad.hdr.qpn        = cpu_to_be32(mad_recv_wc->wc->src_qp);
 248         /*
 249          * On OPA devices it is okay to lose the upper 16 bits of LID as this
 250          * information is obtained elsewhere. Mask off the upper 16 bits.
 251          */
 252         if (rdma_cap_opa_mad(agent->device, agent->port_num))
 253                 packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
 254                                                   mad_recv_wc->wc->slid);
 255         else
 256                 packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
 257         packet->mad.hdr.sl         = mad_recv_wc->wc->sl;
 258         packet->mad.hdr.path_bits  = mad_recv_wc->wc->dlid_path_bits;
 259         packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
 260         packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
 261         if (packet->mad.hdr.grh_present) {
 262                 struct rdma_ah_attr ah_attr;
 263                 const struct ib_global_route *grh;
 264                 int ret;
 265 
 266                 ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
 267                                               mad_recv_wc->wc,
 268                                               mad_recv_wc->recv_buf.grh,
 269                                               &ah_attr);
 270                 if (ret)
 271                         goto err2;
 272 
 273                 grh = rdma_ah_read_grh(&ah_attr);
 274                 packet->mad.hdr.gid_index = grh->sgid_index;
 275                 packet->mad.hdr.hop_limit = grh->hop_limit;
 276                 packet->mad.hdr.traffic_class = grh->traffic_class;
 277                 memcpy(packet->mad.hdr.gid, &grh->dgid, 16);
 278                 packet->mad.hdr.flow_label = cpu_to_be32(grh->flow_label);
 279                 rdma_destroy_ah_attr(&ah_attr);
 280         }
 281 
 282         if (queue_packet(file, agent, packet))
 283                 goto err2;
 284         return;
 285 
 286 err2:
 287         kfree(packet);
 288 err1:
 289         ib_free_recv_mad(mad_recv_wc);
 290 }
 291 
 292 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
 293                              struct ib_umad_packet *packet, size_t count)
 294 {
 295         struct ib_mad_recv_buf *recv_buf;
 296         int left, seg_payload, offset, max_seg_payload;
 297         size_t seg_size;
 298 
 299         recv_buf = &packet->recv_wc->recv_buf;
 300         seg_size = packet->recv_wc->mad_seg_size;
 301 
 302         /* We need enough room to copy the first (or only) MAD segment. */
 303         if ((packet->length <= seg_size &&
 304              count < hdr_size(file) + packet->length) ||
 305             (packet->length > seg_size &&
 306              count < hdr_size(file) + seg_size))
 307                 return -EINVAL;
 308 
 309         if (copy_to_user(buf, &packet->mad, hdr_size(file)))
 310                 return -EFAULT;
 311 
 312         buf += hdr_size(file);
 313         seg_payload = min_t(int, packet->length, seg_size);
 314         if (copy_to_user(buf, recv_buf->mad, seg_payload))
 315                 return -EFAULT;
 316 
 317         if (seg_payload < packet->length) {
 318                 /*
 319                  * Multipacket RMPP MAD message. Copy remainder of message.
 320                  * Note that last segment may have a shorter payload.
 321                  */
 322                 if (count < hdr_size(file) + packet->length) {
 323                         /*
 324                          * The buffer is too small, return the first RMPP segment,
 325                          * which includes the RMPP message length.
 326                          */
 327                         return -ENOSPC;
 328                 }
 329                 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
 330                 max_seg_payload = seg_size - offset;
 331 
 332                 for (left = packet->length - seg_payload, buf += seg_payload;
 333                      left; left -= seg_payload, buf += seg_payload) {
 334                         recv_buf = container_of(recv_buf->list.next,
 335                                                 struct ib_mad_recv_buf, list);
 336                         seg_payload = min(left, max_seg_payload);
 337                         if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
 338                                          seg_payload))
 339                                 return -EFAULT;
 340                 }
 341         }
 342 
 343         trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr);
 344 
 345         return hdr_size(file) + packet->length;
 346 }
 347 
 348 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
 349                              struct ib_umad_packet *packet, size_t count)
 350 {
 351         ssize_t size = hdr_size(file) + packet->length;
 352 
 353         if (count < size)
 354                 return -EINVAL;
 355 
 356         if (copy_to_user(buf, &packet->mad, hdr_size(file)))
 357                 return -EFAULT;
 358 
 359         buf += hdr_size(file);
 360 
 361         if (copy_to_user(buf, packet->mad.data, packet->length))
 362                 return -EFAULT;
 363 
 364         trace_ib_umad_read_send(file, &packet->mad.hdr,
 365                                 (struct ib_mad_hdr *)&packet->mad.data);
 366 
 367         return size;
 368 }
 369 
 370 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
 371                             size_t count, loff_t *pos)
 372 {
 373         struct ib_umad_file *file = filp->private_data;
 374         struct ib_umad_packet *packet;
 375         ssize_t ret;
 376 
 377         if (count < hdr_size(file))
 378                 return -EINVAL;
 379 
 380         mutex_lock(&file->mutex);
 381 
 382         while (list_empty(&file->recv_list)) {
 383                 mutex_unlock(&file->mutex);
 384 
 385                 if (filp->f_flags & O_NONBLOCK)
 386                         return -EAGAIN;
 387 
 388                 if (wait_event_interruptible(file->recv_wait,
 389                                              !list_empty(&file->recv_list)))
 390                         return -ERESTARTSYS;
 391 
 392                 mutex_lock(&file->mutex);
 393         }
 394 
 395         packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
 396         list_del(&packet->list);
 397 
 398         mutex_unlock(&file->mutex);
 399 
 400         if (packet->recv_wc)
 401                 ret = copy_recv_mad(file, buf, packet, count);
 402         else
 403                 ret = copy_send_mad(file, buf, packet, count);
 404 
 405         if (ret < 0) {
 406                 /* Requeue packet */
 407                 mutex_lock(&file->mutex);
 408                 list_add(&packet->list, &file->recv_list);
 409                 mutex_unlock(&file->mutex);
 410         } else {
 411                 if (packet->recv_wc)
 412                         ib_free_recv_mad(packet->recv_wc);
 413                 kfree(packet);
 414         }
 415         return ret;
 416 }
 417 
 418 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
 419 {
 420         int left, seg;
 421 
 422         /* Copy class specific header */
 423         if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
 424             copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
 425                            msg->hdr_len - IB_MGMT_RMPP_HDR))
 426                 return -EFAULT;
 427 
 428         /* All headers are in place.  Copy data segments. */
 429         for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
 430              seg++, left -= msg->seg_size, buf += msg->seg_size) {
 431                 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
 432                                    min(left, msg->seg_size)))
 433                         return -EFAULT;
 434         }
 435         return 0;
 436 }
 437 
 438 static int same_destination(struct ib_user_mad_hdr *hdr1,
 439                             struct ib_user_mad_hdr *hdr2)
 440 {
 441         if (!hdr1->grh_present && !hdr2->grh_present)
 442            return (hdr1->lid == hdr2->lid);
 443 
 444         if (hdr1->grh_present && hdr2->grh_present)
 445            return !memcmp(hdr1->gid, hdr2->gid, 16);
 446 
 447         return 0;
 448 }
 449 
 450 static int is_duplicate(struct ib_umad_file *file,
 451                         struct ib_umad_packet *packet)
 452 {
 453         struct ib_umad_packet *sent_packet;
 454         struct ib_mad_hdr *sent_hdr, *hdr;
 455 
 456         hdr = (struct ib_mad_hdr *) packet->mad.data;
 457         list_for_each_entry(sent_packet, &file->send_list, list) {
 458                 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
 459 
 460                 if ((hdr->tid != sent_hdr->tid) ||
 461                     (hdr->mgmt_class != sent_hdr->mgmt_class))
 462                         continue;
 463 
 464                 /*
 465                  * No need to be overly clever here.  If two new operations have
 466                  * the same TID, reject the second as a duplicate.  This is more
 467                  * restrictive than required by the spec.
 468                  */
 469                 if (!ib_response_mad(hdr)) {
 470                         if (!ib_response_mad(sent_hdr))
 471                                 return 1;
 472                         continue;
 473                 } else if (!ib_response_mad(sent_hdr))
 474                         continue;
 475 
 476                 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
 477                         return 1;
 478         }
 479 
 480         return 0;
 481 }
 482 
 483 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
 484                              size_t count, loff_t *pos)
 485 {
 486         struct ib_umad_file *file = filp->private_data;
 487         struct ib_umad_packet *packet;
 488         struct ib_mad_agent *agent;
 489         struct rdma_ah_attr ah_attr;
 490         struct ib_ah *ah;
 491         struct ib_rmpp_mad *rmpp_mad;
 492         __be64 *tid;
 493         int ret, data_len, hdr_len, copy_offset, rmpp_active;
 494         u8 base_version;
 495 
 496         if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
 497                 return -EINVAL;
 498 
 499         packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
 500         if (!packet)
 501                 return -ENOMEM;
 502 
 503         if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
 504                 ret = -EFAULT;
 505                 goto err;
 506         }
 507 
 508         if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
 509                 ret = -EINVAL;
 510                 goto err;
 511         }
 512 
 513         buf += hdr_size(file);
 514 
 515         if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
 516                 ret = -EFAULT;
 517                 goto err;
 518         }
 519 
 520         mutex_lock(&file->mutex);
 521 
 522         trace_ib_umad_write(file, &packet->mad.hdr,
 523                             (struct ib_mad_hdr *)&packet->mad.data);
 524 
 525         agent = __get_agent(file, packet->mad.hdr.id);
 526         if (!agent) {
 527                 ret = -EINVAL;
 528                 goto err_up;
 529         }
 530 
 531         memset(&ah_attr, 0, sizeof ah_attr);
 532         ah_attr.type = rdma_ah_find_type(agent->device,
 533                                          file->port->port_num);
 534         rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
 535         rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
 536         rdma_ah_set_path_bits(&ah_attr, packet->mad.hdr.path_bits);
 537         rdma_ah_set_port_num(&ah_attr, file->port->port_num);
 538         if (packet->mad.hdr.grh_present) {
 539                 rdma_ah_set_grh(&ah_attr, NULL,
 540                                 be32_to_cpu(packet->mad.hdr.flow_label),
 541                                 packet->mad.hdr.gid_index,
 542                                 packet->mad.hdr.hop_limit,
 543                                 packet->mad.hdr.traffic_class);
 544                 rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
 545         }
 546 
 547         ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
 548         if (IS_ERR(ah)) {
 549                 ret = PTR_ERR(ah);
 550                 goto err_up;
 551         }
 552 
 553         rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
 554         hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
 555 
 556         if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
 557             && ib_mad_kernel_rmpp_agent(agent)) {
 558                 copy_offset = IB_MGMT_RMPP_HDR;
 559                 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
 560                                                 IB_MGMT_RMPP_FLAG_ACTIVE;
 561         } else {
 562                 copy_offset = IB_MGMT_MAD_HDR;
 563                 rmpp_active = 0;
 564         }
 565 
 566         base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version;
 567         data_len = count - hdr_size(file) - hdr_len;
 568         packet->msg = ib_create_send_mad(agent,
 569                                          be32_to_cpu(packet->mad.hdr.qpn),
 570                                          packet->mad.hdr.pkey_index, rmpp_active,
 571                                          hdr_len, data_len, GFP_KERNEL,
 572                                          base_version);
 573         if (IS_ERR(packet->msg)) {
 574                 ret = PTR_ERR(packet->msg);
 575                 goto err_ah;
 576         }
 577 
 578         packet->msg->ah         = ah;
 579         packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
 580         packet->msg->retries    = packet->mad.hdr.retries;
 581         packet->msg->context[0] = packet;
 582 
 583         /* Copy MAD header.  Any RMPP header is already in place. */
 584         memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
 585 
 586         if (!rmpp_active) {
 587                 if (copy_from_user(packet->msg->mad + copy_offset,
 588                                    buf + copy_offset,
 589                                    hdr_len + data_len - copy_offset)) {
 590                         ret = -EFAULT;
 591                         goto err_msg;
 592                 }
 593         } else {
 594                 ret = copy_rmpp_mad(packet->msg, buf);
 595                 if (ret)
 596                         goto err_msg;
 597         }
 598 
 599         /*
 600          * Set the high-order part of the transaction ID to make MADs from
 601          * different agents unique, and allow routing responses back to the
 602          * original requestor.
 603          */
 604         if (!ib_response_mad(packet->msg->mad)) {
 605                 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
 606                 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
 607                                    (be64_to_cpup(tid) & 0xffffffff));
 608                 rmpp_mad->mad_hdr.tid = *tid;
 609         }
 610 
 611         if (!ib_mad_kernel_rmpp_agent(agent)
 612            && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
 613            && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
 614                 spin_lock_irq(&file->send_lock);
 615                 list_add_tail(&packet->list, &file->send_list);
 616                 spin_unlock_irq(&file->send_lock);
 617         } else {
 618                 spin_lock_irq(&file->send_lock);
 619                 ret = is_duplicate(file, packet);
 620                 if (!ret)
 621                         list_add_tail(&packet->list, &file->send_list);
 622                 spin_unlock_irq(&file->send_lock);
 623                 if (ret) {
 624                         ret = -EINVAL;
 625                         goto err_msg;
 626                 }
 627         }
 628 
 629         ret = ib_post_send_mad(packet->msg, NULL);
 630         if (ret)
 631                 goto err_send;
 632 
 633         mutex_unlock(&file->mutex);
 634         return count;
 635 
 636 err_send:
 637         dequeue_send(file, packet);
 638 err_msg:
 639         ib_free_send_mad(packet->msg);
 640 err_ah:
 641         rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
 642 err_up:
 643         mutex_unlock(&file->mutex);
 644 err:
 645         kfree(packet);
 646         return ret;
 647 }
 648 
 649 static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
 650 {
 651         struct ib_umad_file *file = filp->private_data;
 652 
 653         /* we will always be able to post a MAD send */
 654         __poll_t mask = EPOLLOUT | EPOLLWRNORM;
 655 
 656         poll_wait(filp, &file->recv_wait, wait);
 657 
 658         if (!list_empty(&file->recv_list))
 659                 mask |= EPOLLIN | EPOLLRDNORM;
 660 
 661         return mask;
 662 }
 663 
 664 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
 665                              int compat_method_mask)
 666 {
 667         struct ib_user_mad_reg_req ureq;
 668         struct ib_mad_reg_req req;
 669         struct ib_mad_agent *agent = NULL;
 670         int agent_id;
 671         int ret;
 672 
 673         mutex_lock(&file->port->file_mutex);
 674         mutex_lock(&file->mutex);
 675 
 676         if (!file->port->ib_dev) {
 677                 dev_notice(&file->port->dev,
 678                            "ib_umad_reg_agent: invalid device\n");
 679                 ret = -EPIPE;
 680                 goto out;
 681         }
 682 
 683         if (copy_from_user(&ureq, arg, sizeof ureq)) {
 684                 ret = -EFAULT;
 685                 goto out;
 686         }
 687 
 688         if (ureq.qpn != 0 && ureq.qpn != 1) {
 689                 dev_notice(&file->port->dev,
 690                            "ib_umad_reg_agent: invalid QPN %d specified\n",
 691                            ureq.qpn);
 692                 ret = -EINVAL;
 693                 goto out;
 694         }
 695 
 696         for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
 697                 if (!__get_agent(file, agent_id))
 698                         goto found;
 699 
 700         dev_notice(&file->port->dev,
 701                    "ib_umad_reg_agent: Max Agents (%u) reached\n",
 702                    IB_UMAD_MAX_AGENTS);
 703         ret = -ENOMEM;
 704         goto out;
 705 
 706 found:
 707         if (ureq.mgmt_class) {
 708                 memset(&req, 0, sizeof(req));
 709                 req.mgmt_class         = ureq.mgmt_class;
 710                 req.mgmt_class_version = ureq.mgmt_class_version;
 711                 memcpy(req.oui, ureq.oui, sizeof req.oui);
 712 
 713                 if (compat_method_mask) {
 714                         u32 *umm = (u32 *) ureq.method_mask;
 715                         int i;
 716 
 717                         for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
 718                                 req.method_mask[i] =
 719                                         umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
 720                 } else
 721                         memcpy(req.method_mask, ureq.method_mask,
 722                                sizeof req.method_mask);
 723         }
 724 
 725         agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
 726                                       ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
 727                                       ureq.mgmt_class ? &req : NULL,
 728                                       ureq.rmpp_version,
 729                                       send_handler, recv_handler, file, 0);
 730         if (IS_ERR(agent)) {
 731                 ret = PTR_ERR(agent);
 732                 agent = NULL;
 733                 goto out;
 734         }
 735 
 736         if (put_user(agent_id,
 737                      (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
 738                 ret = -EFAULT;
 739                 goto out;
 740         }
 741 
 742         if (!file->already_used) {
 743                 file->already_used = 1;
 744                 if (!file->use_pkey_index) {
 745                         dev_warn(&file->port->dev,
 746                                 "process %s did not enable P_Key index support.\n",
 747                                 current->comm);
 748                         dev_warn(&file->port->dev,
 749                                 "   Documentation/infiniband/user_mad.rst has info on the new ABI.\n");
 750                 }
 751         }
 752 
 753         file->agent[agent_id] = agent;
 754         ret = 0;
 755 
 756 out:
 757         mutex_unlock(&file->mutex);
 758 
 759         if (ret && agent)
 760                 ib_unregister_mad_agent(agent);
 761 
 762         mutex_unlock(&file->port->file_mutex);
 763 
 764         return ret;
 765 }
 766 
 767 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
 768 {
 769         struct ib_user_mad_reg_req2 ureq;
 770         struct ib_mad_reg_req req;
 771         struct ib_mad_agent *agent = NULL;
 772         int agent_id;
 773         int ret;
 774 
 775         mutex_lock(&file->port->file_mutex);
 776         mutex_lock(&file->mutex);
 777 
 778         if (!file->port->ib_dev) {
 779                 dev_notice(&file->port->dev,
 780                            "ib_umad_reg_agent2: invalid device\n");
 781                 ret = -EPIPE;
 782                 goto out;
 783         }
 784 
 785         if (copy_from_user(&ureq, arg, sizeof(ureq))) {
 786                 ret = -EFAULT;
 787                 goto out;
 788         }
 789 
 790         if (ureq.qpn != 0 && ureq.qpn != 1) {
 791                 dev_notice(&file->port->dev,
 792                            "ib_umad_reg_agent2: invalid QPN %d specified\n",
 793                            ureq.qpn);
 794                 ret = -EINVAL;
 795                 goto out;
 796         }
 797 
 798         if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
 799                 dev_notice(&file->port->dev,
 800                            "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
 801                            ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
 802                 ret = -EINVAL;
 803 
 804                 if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
 805                                 (u32 __user *) (arg + offsetof(struct
 806                                 ib_user_mad_reg_req2, flags))))
 807                         ret = -EFAULT;
 808 
 809                 goto out;
 810         }
 811 
 812         for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
 813                 if (!__get_agent(file, agent_id))
 814                         goto found;
 815 
 816         dev_notice(&file->port->dev,
 817                    "ib_umad_reg_agent2: Max Agents (%u) reached\n",
 818                    IB_UMAD_MAX_AGENTS);
 819         ret = -ENOMEM;
 820         goto out;
 821 
 822 found:
 823         if (ureq.mgmt_class) {
 824                 memset(&req, 0, sizeof(req));
 825                 req.mgmt_class         = ureq.mgmt_class;
 826                 req.mgmt_class_version = ureq.mgmt_class_version;
 827                 if (ureq.oui & 0xff000000) {
 828                         dev_notice(&file->port->dev,
 829                                    "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
 830                                    ureq.oui);
 831                         ret = -EINVAL;
 832                         goto out;
 833                 }
 834                 req.oui[2] =  ureq.oui & 0x0000ff;
 835                 req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
 836                 req.oui[0] = (ureq.oui & 0xff0000) >> 16;
 837                 memcpy(req.method_mask, ureq.method_mask,
 838                         sizeof(req.method_mask));
 839         }
 840 
 841         agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
 842                                       ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
 843                                       ureq.mgmt_class ? &req : NULL,
 844                                       ureq.rmpp_version,
 845                                       send_handler, recv_handler, file,
 846                                       ureq.flags);
 847         if (IS_ERR(agent)) {
 848                 ret = PTR_ERR(agent);
 849                 agent = NULL;
 850                 goto out;
 851         }
 852 
 853         if (put_user(agent_id,
 854                      (u32 __user *)(arg +
 855                                 offsetof(struct ib_user_mad_reg_req2, id)))) {
 856                 ret = -EFAULT;
 857                 goto out;
 858         }
 859 
 860         if (!file->already_used) {
 861                 file->already_used = 1;
 862                 file->use_pkey_index = 1;
 863         }
 864 
 865         file->agent[agent_id] = agent;
 866         ret = 0;
 867 
 868 out:
 869         mutex_unlock(&file->mutex);
 870 
 871         if (ret && agent)
 872                 ib_unregister_mad_agent(agent);
 873 
 874         mutex_unlock(&file->port->file_mutex);
 875 
 876         return ret;
 877 }
 878 
 879 
 880 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 881 {
 882         struct ib_mad_agent *agent = NULL;
 883         u32 id;
 884         int ret = 0;
 885 
 886         if (get_user(id, arg))
 887                 return -EFAULT;
 888         if (id >= IB_UMAD_MAX_AGENTS)
 889                 return -EINVAL;
 890 
 891         mutex_lock(&file->port->file_mutex);
 892         mutex_lock(&file->mutex);
 893 
 894         id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
 895         if (!__get_agent(file, id)) {
 896                 ret = -EINVAL;
 897                 goto out;
 898         }
 899 
 900         agent = file->agent[id];
 901         file->agent[id] = NULL;
 902 
 903 out:
 904         mutex_unlock(&file->mutex);
 905 
 906         if (agent)
 907                 ib_unregister_mad_agent(agent);
 908 
 909         mutex_unlock(&file->port->file_mutex);
 910 
 911         return ret;
 912 }
 913 
 914 static long ib_umad_enable_pkey(struct ib_umad_file *file)
 915 {
 916         int ret = 0;
 917 
 918         mutex_lock(&file->mutex);
 919         if (file->already_used)
 920                 ret = -EINVAL;
 921         else
 922                 file->use_pkey_index = 1;
 923         mutex_unlock(&file->mutex);
 924 
 925         return ret;
 926 }
 927 
 928 static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
 929                           unsigned long arg)
 930 {
 931         switch (cmd) {
 932         case IB_USER_MAD_REGISTER_AGENT:
 933                 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
 934         case IB_USER_MAD_UNREGISTER_AGENT:
 935                 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
 936         case IB_USER_MAD_ENABLE_PKEY:
 937                 return ib_umad_enable_pkey(filp->private_data);
 938         case IB_USER_MAD_REGISTER_AGENT2:
 939                 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg);
 940         default:
 941                 return -ENOIOCTLCMD;
 942         }
 943 }
 944 
 945 #ifdef CONFIG_COMPAT
 946 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
 947                                  unsigned long arg)
 948 {
 949         switch (cmd) {
 950         case IB_USER_MAD_REGISTER_AGENT:
 951                 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
 952         case IB_USER_MAD_UNREGISTER_AGENT:
 953                 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
 954         case IB_USER_MAD_ENABLE_PKEY:
 955                 return ib_umad_enable_pkey(filp->private_data);
 956         case IB_USER_MAD_REGISTER_AGENT2:
 957                 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg));
 958         default:
 959                 return -ENOIOCTLCMD;
 960         }
 961 }
 962 #endif
 963 
 964 /*
 965  * ib_umad_open() does not need the BKL:
 966  *
 967  *  - the ib_umad_port structures are properly reference counted, and
 968  *    everything else is purely local to the file being created, so
 969  *    races against other open calls are not a problem;
 970  *  - the ioctl method does not affect any global state outside of the
 971  *    file structure being operated on;
 972  */
 973 static int ib_umad_open(struct inode *inode, struct file *filp)
 974 {
 975         struct ib_umad_port *port;
 976         struct ib_umad_file *file;
 977         int ret = 0;
 978 
 979         port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
 980 
 981         mutex_lock(&port->file_mutex);
 982 
 983         if (!port->ib_dev) {
 984                 ret = -ENXIO;
 985                 goto out;
 986         }
 987 
 988         if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
 989                 ret = -EPERM;
 990                 goto out;
 991         }
 992 
 993         file = kzalloc(sizeof(*file), GFP_KERNEL);
 994         if (!file) {
 995                 ret = -ENOMEM;
 996                 goto out;
 997         }
 998 
 999         mutex_init(&file->mutex);
1000         spin_lock_init(&file->send_lock);
1001         INIT_LIST_HEAD(&file->recv_list);
1002         INIT_LIST_HEAD(&file->send_list);
1003         init_waitqueue_head(&file->recv_wait);
1004 
1005         file->port = port;
1006         filp->private_data = file;
1007 
1008         list_add_tail(&file->port_list, &port->file_list);
1009 
1010         stream_open(inode, filp);
1011 out:
1012         mutex_unlock(&port->file_mutex);
1013         return ret;
1014 }
1015 
1016 static int ib_umad_close(struct inode *inode, struct file *filp)
1017 {
1018         struct ib_umad_file *file = filp->private_data;
1019         struct ib_umad_packet *packet, *tmp;
1020         int already_dead;
1021         int i;
1022 
1023         mutex_lock(&file->port->file_mutex);
1024         mutex_lock(&file->mutex);
1025 
1026         already_dead = file->agents_dead;
1027         file->agents_dead = 1;
1028 
1029         list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
1030                 if (packet->recv_wc)
1031                         ib_free_recv_mad(packet->recv_wc);
1032                 kfree(packet);
1033         }
1034 
1035         list_del(&file->port_list);
1036 
1037         mutex_unlock(&file->mutex);
1038 
1039         if (!already_dead)
1040                 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
1041                         if (file->agent[i])
1042                                 ib_unregister_mad_agent(file->agent[i]);
1043 
1044         mutex_unlock(&file->port->file_mutex);
1045         mutex_destroy(&file->mutex);
1046         kfree(file);
1047         return 0;
1048 }
1049 
1050 static const struct file_operations umad_fops = {
1051         .owner          = THIS_MODULE,
1052         .read           = ib_umad_read,
1053         .write          = ib_umad_write,
1054         .poll           = ib_umad_poll,
1055         .unlocked_ioctl = ib_umad_ioctl,
1056 #ifdef CONFIG_COMPAT
1057         .compat_ioctl   = ib_umad_compat_ioctl,
1058 #endif
1059         .open           = ib_umad_open,
1060         .release        = ib_umad_close,
1061         .llseek         = no_llseek,
1062 };
1063 
1064 static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1065 {
1066         struct ib_umad_port *port;
1067         struct ib_port_modify props = {
1068                 .set_port_cap_mask = IB_PORT_SM
1069         };
1070         int ret;
1071 
1072         port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
1073 
1074         if (filp->f_flags & O_NONBLOCK) {
1075                 if (down_trylock(&port->sm_sem)) {
1076                         ret = -EAGAIN;
1077                         goto fail;
1078                 }
1079         } else {
1080                 if (down_interruptible(&port->sm_sem)) {
1081                         ret = -ERESTARTSYS;
1082                         goto fail;
1083                 }
1084         }
1085 
1086         if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
1087                 ret = -EPERM;
1088                 goto err_up_sem;
1089         }
1090 
1091         ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1092         if (ret)
1093                 goto err_up_sem;
1094 
1095         filp->private_data = port;
1096 
1097         nonseekable_open(inode, filp);
1098         return 0;
1099 
1100 err_up_sem:
1101         up(&port->sm_sem);
1102 
1103 fail:
1104         return ret;
1105 }
1106 
1107 static int ib_umad_sm_close(struct inode *inode, struct file *filp)
1108 {
1109         struct ib_umad_port *port = filp->private_data;
1110         struct ib_port_modify props = {
1111                 .clr_port_cap_mask = IB_PORT_SM
1112         };
1113         int ret = 0;
1114 
1115         mutex_lock(&port->file_mutex);
1116         if (port->ib_dev)
1117                 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1118         mutex_unlock(&port->file_mutex);
1119 
1120         up(&port->sm_sem);
1121 
1122         return ret;
1123 }
1124 
1125 static const struct file_operations umad_sm_fops = {
1126         .owner   = THIS_MODULE,
1127         .open    = ib_umad_sm_open,
1128         .release = ib_umad_sm_close,
1129         .llseek  = no_llseek,
1130 };
1131 
1132 static struct ib_umad_port *get_port(struct ib_device *ibdev,
1133                                      struct ib_umad_device *umad_dev,
1134                                      unsigned int port)
1135 {
1136         if (!umad_dev)
1137                 return ERR_PTR(-EOPNOTSUPP);
1138         if (!rdma_is_port_valid(ibdev, port))
1139                 return ERR_PTR(-EINVAL);
1140         if (!rdma_cap_ib_mad(ibdev, port))
1141                 return ERR_PTR(-EOPNOTSUPP);
1142 
1143         return &umad_dev->ports[port - rdma_start_port(ibdev)];
1144 }
1145 
1146 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
1147                                struct ib_client_nl_info *res)
1148 {
1149         struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1150 
1151         if (IS_ERR(port))
1152                 return PTR_ERR(port);
1153 
1154         res->abi = IB_USER_MAD_ABI_VERSION;
1155         res->cdev = &port->dev;
1156         return 0;
1157 }
1158 
1159 static struct ib_client umad_client = {
1160         .name   = "umad",
1161         .add    = ib_umad_add_one,
1162         .remove = ib_umad_remove_one,
1163         .get_nl_info = ib_umad_get_nl_info,
1164 };
1165 MODULE_ALIAS_RDMA_CLIENT("umad");
1166 
1167 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
1168                                struct ib_client_nl_info *res)
1169 {
1170         struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1171 
1172         if (IS_ERR(port))
1173                 return PTR_ERR(port);
1174 
1175         res->abi = IB_USER_MAD_ABI_VERSION;
1176         res->cdev = &port->sm_dev;
1177         return 0;
1178 }
1179 
1180 static struct ib_client issm_client = {
1181         .name = "issm",
1182         .get_nl_info = ib_issm_get_nl_info,
1183 };
1184 MODULE_ALIAS_RDMA_CLIENT("issm");
1185 
1186 static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
1187                           char *buf)
1188 {
1189         struct ib_umad_port *port = dev_get_drvdata(dev);
1190 
1191         if (!port)
1192                 return -ENODEV;
1193 
1194         return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev));
1195 }
1196 static DEVICE_ATTR_RO(ibdev);
1197 
1198 static ssize_t port_show(struct device *dev, struct device_attribute *attr,
1199                          char *buf)
1200 {
1201         struct ib_umad_port *port = dev_get_drvdata(dev);
1202 
1203         if (!port)
1204                 return -ENODEV;
1205 
1206         return sprintf(buf, "%d\n", port->port_num);
1207 }
1208 static DEVICE_ATTR_RO(port);
1209 
1210 static struct attribute *umad_class_dev_attrs[] = {
1211         &dev_attr_ibdev.attr,
1212         &dev_attr_port.attr,
1213         NULL,
1214 };
1215 ATTRIBUTE_GROUPS(umad_class_dev);
1216 
1217 static char *umad_devnode(struct device *dev, umode_t *mode)
1218 {
1219         return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1220 }
1221 
1222 static ssize_t abi_version_show(struct class *class,
1223                                 struct class_attribute *attr, char *buf)
1224 {
1225         return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
1226 }
1227 static CLASS_ATTR_RO(abi_version);
1228 
1229 static struct attribute *umad_class_attrs[] = {
1230         &class_attr_abi_version.attr,
1231         NULL,
1232 };
1233 ATTRIBUTE_GROUPS(umad_class);
1234 
1235 static struct class umad_class = {
1236         .name           = "infiniband_mad",
1237         .devnode        = umad_devnode,
1238         .class_groups   = umad_class_groups,
1239         .dev_groups     = umad_class_dev_groups,
1240 };
1241 
1242 static void ib_umad_release_port(struct device *device)
1243 {
1244         struct ib_umad_port *port = dev_get_drvdata(device);
1245         struct ib_umad_device *umad_dev = port->umad_dev;
1246 
1247         ib_umad_dev_put(umad_dev);
1248 }
1249 
1250 static void ib_umad_init_port_dev(struct device *dev,
1251                                   struct ib_umad_port *port,
1252                                   const struct ib_device *device)
1253 {
1254         device_initialize(dev);
1255         ib_umad_dev_get(port->umad_dev);
1256         dev->class = &umad_class;
1257         dev->parent = device->dev.parent;
1258         dev_set_drvdata(dev, port);
1259         dev->release = ib_umad_release_port;
1260 }
1261 
1262 static int ib_umad_init_port(struct ib_device *device, int port_num,
1263                              struct ib_umad_device *umad_dev,
1264                              struct ib_umad_port *port)
1265 {
1266         int devnum;
1267         dev_t base_umad;
1268         dev_t base_issm;
1269         int ret;
1270 
1271         devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
1272         if (devnum < 0)
1273                 return -1;
1274         port->dev_num = devnum;
1275         if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
1276                 base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
1277                 base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
1278         } else {
1279                 base_umad = devnum + base_umad_dev;
1280                 base_issm = devnum + base_issm_dev;
1281         }
1282 
1283         port->ib_dev   = device;
1284         port->umad_dev = umad_dev;
1285         port->port_num = port_num;
1286         sema_init(&port->sm_sem, 1);
1287         mutex_init(&port->file_mutex);
1288         INIT_LIST_HEAD(&port->file_list);
1289 
1290         ib_umad_init_port_dev(&port->dev, port, device);
1291         port->dev.devt = base_umad;
1292         dev_set_name(&port->dev, "umad%d", port->dev_num);
1293         cdev_init(&port->cdev, &umad_fops);
1294         port->cdev.owner = THIS_MODULE;
1295 
1296         ret = cdev_device_add(&port->cdev, &port->dev);
1297         if (ret)
1298                 goto err_cdev;
1299 
1300         ib_umad_init_port_dev(&port->sm_dev, port, device);
1301         port->sm_dev.devt = base_issm;
1302         dev_set_name(&port->sm_dev, "issm%d", port->dev_num);
1303         cdev_init(&port->sm_cdev, &umad_sm_fops);
1304         port->sm_cdev.owner = THIS_MODULE;
1305 
1306         ret = cdev_device_add(&port->sm_cdev, &port->sm_dev);
1307         if (ret)
1308                 goto err_dev;
1309 
1310         return 0;
1311 
1312 err_dev:
1313         put_device(&port->sm_dev);
1314         cdev_device_del(&port->cdev, &port->dev);
1315 err_cdev:
1316         put_device(&port->dev);
1317         ida_free(&umad_ida, devnum);
1318         return ret;
1319 }
1320 
1321 static void ib_umad_kill_port(struct ib_umad_port *port)
1322 {
1323         struct ib_umad_file *file;
1324         int id;
1325 
1326         cdev_device_del(&port->sm_cdev, &port->sm_dev);
1327         cdev_device_del(&port->cdev, &port->dev);
1328 
1329         mutex_lock(&port->file_mutex);
1330 
1331         /* Mark ib_dev NULL and block ioctl or other file ops to progress
1332          * further.
1333          */
1334         port->ib_dev = NULL;
1335 
1336         list_for_each_entry(file, &port->file_list, port_list) {
1337                 mutex_lock(&file->mutex);
1338                 file->agents_dead = 1;
1339                 mutex_unlock(&file->mutex);
1340 
1341                 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
1342                         if (file->agent[id])
1343                                 ib_unregister_mad_agent(file->agent[id]);
1344         }
1345 
1346         mutex_unlock(&port->file_mutex);
1347 
1348         ida_free(&umad_ida, port->dev_num);
1349 
1350         /* balances device_initialize() */
1351         put_device(&port->sm_dev);
1352         put_device(&port->dev);
1353 }
1354 
1355 static void ib_umad_add_one(struct ib_device *device)
1356 {
1357         struct ib_umad_device *umad_dev;
1358         int s, e, i;
1359         int count = 0;
1360 
1361         s = rdma_start_port(device);
1362         e = rdma_end_port(device);
1363 
1364         umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL);
1365         if (!umad_dev)
1366                 return;
1367 
1368         kref_init(&umad_dev->kref);
1369         for (i = s; i <= e; ++i) {
1370                 if (!rdma_cap_ib_mad(device, i))
1371                         continue;
1372 
1373                 if (ib_umad_init_port(device, i, umad_dev,
1374                                       &umad_dev->ports[i - s]))
1375                         goto err;
1376 
1377                 count++;
1378         }
1379 
1380         if (!count)
1381                 goto free;
1382 
1383         ib_set_client_data(device, &umad_client, umad_dev);
1384 
1385         return;
1386 
1387 err:
1388         while (--i >= s) {
1389                 if (!rdma_cap_ib_mad(device, i))
1390                         continue;
1391 
1392                 ib_umad_kill_port(&umad_dev->ports[i - s]);
1393         }
1394 free:
1395         /* balances kref_init */
1396         ib_umad_dev_put(umad_dev);
1397 }
1398 
1399 static void ib_umad_remove_one(struct ib_device *device, void *client_data)
1400 {
1401         struct ib_umad_device *umad_dev = client_data;
1402         unsigned int i;
1403 
1404         if (!umad_dev)
1405                 return;
1406 
1407         rdma_for_each_port (device, i) {
1408                 if (rdma_cap_ib_mad(device, i))
1409                         ib_umad_kill_port(
1410                                 &umad_dev->ports[i - rdma_start_port(device)]);
1411         }
1412         /* balances kref_init() */
1413         ib_umad_dev_put(umad_dev);
1414 }
1415 
1416 static int __init ib_umad_init(void)
1417 {
1418         int ret;
1419 
1420         ret = register_chrdev_region(base_umad_dev,
1421                                      IB_UMAD_NUM_FIXED_MINOR * 2,
1422                                      umad_class.name);
1423         if (ret) {
1424                 pr_err("couldn't register device number\n");
1425                 goto out;
1426         }
1427 
1428         ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
1429                                   IB_UMAD_NUM_DYNAMIC_MINOR * 2,
1430                                   umad_class.name);
1431         if (ret) {
1432                 pr_err("couldn't register dynamic device number\n");
1433                 goto out_alloc;
1434         }
1435         dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
1436 
1437         ret = class_register(&umad_class);
1438         if (ret) {
1439                 pr_err("couldn't create class infiniband_mad\n");
1440                 goto out_chrdev;
1441         }
1442 
1443         ret = ib_register_client(&umad_client);
1444         if (ret)
1445                 goto out_class;
1446 
1447         ret = ib_register_client(&issm_client);
1448         if (ret)
1449                 goto out_client;
1450 
1451         return 0;
1452 
1453 out_client:
1454         ib_unregister_client(&umad_client);
1455 out_class:
1456         class_unregister(&umad_class);
1457 
1458 out_chrdev:
1459         unregister_chrdev_region(dynamic_umad_dev,
1460                                  IB_UMAD_NUM_DYNAMIC_MINOR * 2);
1461 
1462 out_alloc:
1463         unregister_chrdev_region(base_umad_dev,
1464                                  IB_UMAD_NUM_FIXED_MINOR * 2);
1465 
1466 out:
1467         return ret;
1468 }
1469 
1470 static void __exit ib_umad_cleanup(void)
1471 {
1472         ib_unregister_client(&issm_client);
1473         ib_unregister_client(&umad_client);
1474         class_unregister(&umad_class);
1475         unregister_chrdev_region(base_umad_dev,
1476                                  IB_UMAD_NUM_FIXED_MINOR * 2);
1477         unregister_chrdev_region(dynamic_umad_dev,
1478                                  IB_UMAD_NUM_DYNAMIC_MINOR * 2);
1479 }
1480 
1481 module_init(ib_umad_init);
1482 module_exit(ib_umad_cleanup);

/* [<][>][^][v][top][bottom][index][help] */