1/* 2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved. 3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 */ 34 35#include <linux/module.h> 36#include <linux/moduleparam.h> 37#include <linux/pci.h> 38#include <linux/netdevice.h> 39#include <linux/etherdevice.h> 40#include <linux/inetdevice.h> 41#include <linux/delay.h> 42#include <linux/ethtool.h> 43#include <linux/mii.h> 44#include <linux/if_vlan.h> 45#include <linux/crc32.h> 46#include <linux/in.h> 47#include <linux/ip.h> 48#include <linux/tcp.h> 49#include <linux/init.h> 50#include <linux/dma-mapping.h> 51#include <linux/if_arp.h> 52#include <linux/vmalloc.h> 53#include <linux/slab.h> 54 55#include <asm/io.h> 56#include <asm/irq.h> 57#include <asm/byteorder.h> 58 59#include <rdma/ib_smi.h> 60#include <rdma/ib_umem.h> 61#include <rdma/ib_user_verbs.h> 62#include "c2.h" 63#include "c2_provider.h" 64#include "c2_user.h" 65 66static int c2_query_device(struct ib_device *ibdev, 67 struct ib_device_attr *props) 68{ 69 struct c2_dev *c2dev = to_c2dev(ibdev); 70 71 pr_debug("%s:%u\n", __func__, __LINE__); 72 73 *props = c2dev->props; 74 return 0; 75} 76 77static int c2_query_port(struct ib_device *ibdev, 78 u8 port, struct ib_port_attr *props) 79{ 80 pr_debug("%s:%u\n", __func__, __LINE__); 81 82 props->max_mtu = IB_MTU_4096; 83 props->lid = 0; 84 props->lmc = 0; 85 props->sm_lid = 0; 86 props->sm_sl = 0; 87 props->state = IB_PORT_ACTIVE; 88 props->phys_state = 0; 89 props->port_cap_flags = 90 IB_PORT_CM_SUP | 91 IB_PORT_REINIT_SUP | 92 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 93 props->gid_tbl_len = 1; 94 props->pkey_tbl_len = 1; 95 props->qkey_viol_cntr = 0; 96 props->active_width = 1; 97 props->active_speed = IB_SPEED_SDR; 98 99 return 0; 100} 101 102static int c2_query_pkey(struct ib_device *ibdev, 103 u8 port, u16 index, u16 * pkey) 104{ 105 pr_debug("%s:%u\n", __func__, __LINE__); 106 *pkey = 0; 107 return 0; 108} 109 110static int c2_query_gid(struct ib_device *ibdev, u8 port, 111 int index, union ib_gid *gid) 112{ 113 struct c2_dev *c2dev = to_c2dev(ibdev); 114 115 pr_debug("%s:%u\n", __func__, __LINE__); 116 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 117 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); 118 119 return 0; 120} 121 122/* Allocate the user context data structure. This keeps track 123 * of all objects associated with a particular user-mode client. 124 */ 125static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev, 126 struct ib_udata *udata) 127{ 128 struct c2_ucontext *context; 129 130 pr_debug("%s:%u\n", __func__, __LINE__); 131 context = kmalloc(sizeof(*context), GFP_KERNEL); 132 if (!context) 133 return ERR_PTR(-ENOMEM); 134 135 return &context->ibucontext; 136} 137 138static int c2_dealloc_ucontext(struct ib_ucontext *context) 139{ 140 pr_debug("%s:%u\n", __func__, __LINE__); 141 kfree(context); 142 return 0; 143} 144 145static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) 146{ 147 pr_debug("%s:%u\n", __func__, __LINE__); 148 return -ENOSYS; 149} 150 151static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev, 152 struct ib_ucontext *context, 153 struct ib_udata *udata) 154{ 155 struct c2_pd *pd; 156 int err; 157 158 pr_debug("%s:%u\n", __func__, __LINE__); 159 160 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 161 if (!pd) 162 return ERR_PTR(-ENOMEM); 163 164 err = c2_pd_alloc(to_c2dev(ibdev), !context, pd); 165 if (err) { 166 kfree(pd); 167 return ERR_PTR(err); 168 } 169 170 if (context) { 171 if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) { 172 c2_pd_free(to_c2dev(ibdev), pd); 173 kfree(pd); 174 return ERR_PTR(-EFAULT); 175 } 176 } 177 178 return &pd->ibpd; 179} 180 181static int c2_dealloc_pd(struct ib_pd *pd) 182{ 183 pr_debug("%s:%u\n", __func__, __LINE__); 184 c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); 185 kfree(pd); 186 187 return 0; 188} 189 190static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 191{ 192 pr_debug("%s:%u\n", __func__, __LINE__); 193 return ERR_PTR(-ENOSYS); 194} 195 196static int c2_ah_destroy(struct ib_ah *ah) 197{ 198 pr_debug("%s:%u\n", __func__, __LINE__); 199 return -ENOSYS; 200} 201 202static void c2_add_ref(struct ib_qp *ibqp) 203{ 204 struct c2_qp *qp; 205 BUG_ON(!ibqp); 206 qp = to_c2qp(ibqp); 207 atomic_inc(&qp->refcount); 208} 209 210static void c2_rem_ref(struct ib_qp *ibqp) 211{ 212 struct c2_qp *qp; 213 BUG_ON(!ibqp); 214 qp = to_c2qp(ibqp); 215 if (atomic_dec_and_test(&qp->refcount)) 216 wake_up(&qp->wait); 217} 218 219struct ib_qp *c2_get_qp(struct ib_device *device, int qpn) 220{ 221 struct c2_dev* c2dev = to_c2dev(device); 222 struct c2_qp *qp; 223 224 qp = c2_find_qpn(c2dev, qpn); 225 pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", 226 __func__, qp, qpn, device, 227 (qp?atomic_read(&qp->refcount):0)); 228 229 return (qp?&qp->ibqp:NULL); 230} 231 232static struct ib_qp *c2_create_qp(struct ib_pd *pd, 233 struct ib_qp_init_attr *init_attr, 234 struct ib_udata *udata) 235{ 236 struct c2_qp *qp; 237 int err; 238 239 pr_debug("%s:%u\n", __func__, __LINE__); 240 241 if (init_attr->create_flags) 242 return ERR_PTR(-EINVAL); 243 244 switch (init_attr->qp_type) { 245 case IB_QPT_RC: 246 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 247 if (!qp) { 248 pr_debug("%s: Unable to allocate QP\n", __func__); 249 return ERR_PTR(-ENOMEM); 250 } 251 spin_lock_init(&qp->lock); 252 if (pd->uobject) { 253 /* userspace specific */ 254 } 255 256 err = c2_alloc_qp(to_c2dev(pd->device), 257 to_c2pd(pd), init_attr, qp); 258 259 if (err && pd->uobject) { 260 /* userspace specific */ 261 } 262 263 break; 264 default: 265 pr_debug("%s: Invalid QP type: %d\n", __func__, 266 init_attr->qp_type); 267 return ERR_PTR(-EINVAL); 268 } 269 270 if (err) { 271 kfree(qp); 272 return ERR_PTR(err); 273 } 274 275 return &qp->ibqp; 276} 277 278static int c2_destroy_qp(struct ib_qp *ib_qp) 279{ 280 struct c2_qp *qp = to_c2qp(ib_qp); 281 282 pr_debug("%s:%u qp=%p,qp->state=%d\n", 283 __func__, __LINE__, ib_qp, qp->state); 284 c2_free_qp(to_c2dev(ib_qp->device), qp); 285 kfree(qp); 286 return 0; 287} 288 289static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vector, 290 struct ib_ucontext *context, 291 struct ib_udata *udata) 292{ 293 struct c2_cq *cq; 294 int err; 295 296 cq = kmalloc(sizeof(*cq), GFP_KERNEL); 297 if (!cq) { 298 pr_debug("%s: Unable to allocate CQ\n", __func__); 299 return ERR_PTR(-ENOMEM); 300 } 301 302 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); 303 if (err) { 304 pr_debug("%s: error initializing CQ\n", __func__); 305 kfree(cq); 306 return ERR_PTR(err); 307 } 308 309 return &cq->ibcq; 310} 311 312static int c2_destroy_cq(struct ib_cq *ib_cq) 313{ 314 struct c2_cq *cq = to_c2cq(ib_cq); 315 316 pr_debug("%s:%u\n", __func__, __LINE__); 317 318 c2_free_cq(to_c2dev(ib_cq->device), cq); 319 kfree(cq); 320 321 return 0; 322} 323 324static inline u32 c2_convert_access(int acc) 325{ 326 return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) | 327 (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) | 328 (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) | 329 C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND; 330} 331 332static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd, 333 struct ib_phys_buf *buffer_list, 334 int num_phys_buf, int acc, u64 * iova_start) 335{ 336 struct c2_mr *mr; 337 u64 *page_list; 338 u32 total_len; 339 int err, i, j, k, page_shift, pbl_depth; 340 341 pbl_depth = 0; 342 total_len = 0; 343 344 page_shift = PAGE_SHIFT; 345 /* 346 * If there is only 1 buffer we assume this could 347 * be a map of all phy mem...use a 32k page_shift. 348 */ 349 if (num_phys_buf == 1) 350 page_shift += 3; 351 352 for (i = 0; i < num_phys_buf; i++) { 353 354 if (buffer_list[i].addr & ~PAGE_MASK) { 355 pr_debug("Unaligned Memory Buffer: 0x%x\n", 356 (unsigned int) buffer_list[i].addr); 357 return ERR_PTR(-EINVAL); 358 } 359 360 if (!buffer_list[i].size) { 361 pr_debug("Invalid Buffer Size\n"); 362 return ERR_PTR(-EINVAL); 363 } 364 365 total_len += buffer_list[i].size; 366 pbl_depth += ALIGN(buffer_list[i].size, 367 (1 << page_shift)) >> page_shift; 368 } 369 370 page_list = vmalloc(sizeof(u64) * pbl_depth); 371 if (!page_list) { 372 pr_debug("couldn't vmalloc page_list of size %zd\n", 373 (sizeof(u64) * pbl_depth)); 374 return ERR_PTR(-ENOMEM); 375 } 376 377 for (i = 0, j = 0; i < num_phys_buf; i++) { 378 379 int naddrs; 380 381 naddrs = ALIGN(buffer_list[i].size, 382 (1 << page_shift)) >> page_shift; 383 for (k = 0; k < naddrs; k++) 384 page_list[j++] = (buffer_list[i].addr + 385 (k << page_shift)); 386 } 387 388 mr = kmalloc(sizeof(*mr), GFP_KERNEL); 389 if (!mr) { 390 vfree(page_list); 391 return ERR_PTR(-ENOMEM); 392 } 393 394 mr->pd = to_c2pd(ib_pd); 395 mr->umem = NULL; 396 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " 397 "*iova_start %llx, first pa %llx, last pa %llx\n", 398 __func__, page_shift, pbl_depth, total_len, 399 (unsigned long long) *iova_start, 400 (unsigned long long) page_list[0], 401 (unsigned long long) page_list[pbl_depth-1]); 402 err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, 403 (1 << page_shift), pbl_depth, 404 total_len, 0, iova_start, 405 c2_convert_access(acc), mr); 406 vfree(page_list); 407 if (err) { 408 kfree(mr); 409 return ERR_PTR(err); 410 } 411 412 return &mr->ibmr; 413} 414 415static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc) 416{ 417 struct ib_phys_buf bl; 418 u64 kva = 0; 419 420 pr_debug("%s:%u\n", __func__, __LINE__); 421 422 /* AMSO1100 limit */ 423 bl.size = 0xffffffff; 424 bl.addr = 0; 425 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); 426} 427 428static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 429 u64 virt, int acc, struct ib_udata *udata) 430{ 431 u64 *pages; 432 u64 kva = 0; 433 int shift, n, len; 434 int i, k, entry; 435 int err = 0; 436 struct scatterlist *sg; 437 struct c2_pd *c2pd = to_c2pd(pd); 438 struct c2_mr *c2mr; 439 440 pr_debug("%s:%u\n", __func__, __LINE__); 441 442 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); 443 if (!c2mr) 444 return ERR_PTR(-ENOMEM); 445 c2mr->pd = c2pd; 446 447 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 448 if (IS_ERR(c2mr->umem)) { 449 err = PTR_ERR(c2mr->umem); 450 kfree(c2mr); 451 return ERR_PTR(err); 452 } 453 454 shift = ffs(c2mr->umem->page_size) - 1; 455 n = c2mr->umem->nmap; 456 457 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 458 if (!pages) { 459 err = -ENOMEM; 460 goto err; 461 } 462 463 i = 0; 464 for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) { 465 len = sg_dma_len(sg) >> shift; 466 for (k = 0; k < len; ++k) { 467 pages[i++] = 468 sg_dma_address(sg) + 469 (c2mr->umem->page_size * k); 470 } 471 } 472 473 kva = virt; 474 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), 475 pages, 476 c2mr->umem->page_size, 477 i, 478 length, 479 ib_umem_offset(c2mr->umem), 480 &kva, 481 c2_convert_access(acc), 482 c2mr); 483 kfree(pages); 484 if (err) 485 goto err; 486 return &c2mr->ibmr; 487 488err: 489 ib_umem_release(c2mr->umem); 490 kfree(c2mr); 491 return ERR_PTR(err); 492} 493 494static int c2_dereg_mr(struct ib_mr *ib_mr) 495{ 496 struct c2_mr *mr = to_c2mr(ib_mr); 497 int err; 498 499 pr_debug("%s:%u\n", __func__, __LINE__); 500 501 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); 502 if (err) 503 pr_debug("c2_stag_dealloc failed: %d\n", err); 504 else { 505 if (mr->umem) 506 ib_umem_release(mr->umem); 507 kfree(mr); 508 } 509 510 return err; 511} 512 513static ssize_t show_rev(struct device *dev, struct device_attribute *attr, 514 char *buf) 515{ 516 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev); 517 pr_debug("%s:%u\n", __func__, __LINE__); 518 return sprintf(buf, "%x\n", c2dev->props.hw_ver); 519} 520 521static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 522 char *buf) 523{ 524 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev); 525 pr_debug("%s:%u\n", __func__, __LINE__); 526 return sprintf(buf, "%x.%x.%x\n", 527 (int) (c2dev->props.fw_ver >> 32), 528 (int) (c2dev->props.fw_ver >> 16) & 0xffff, 529 (int) (c2dev->props.fw_ver & 0xffff)); 530} 531 532static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 533 char *buf) 534{ 535 pr_debug("%s:%u\n", __func__, __LINE__); 536 return sprintf(buf, "AMSO1100\n"); 537} 538 539static ssize_t show_board(struct device *dev, struct device_attribute *attr, 540 char *buf) 541{ 542 pr_debug("%s:%u\n", __func__, __LINE__); 543 return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID"); 544} 545 546static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 547static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 548static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 549static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 550 551static struct device_attribute *c2_dev_attributes[] = { 552 &dev_attr_hw_rev, 553 &dev_attr_fw_ver, 554 &dev_attr_hca_type, 555 &dev_attr_board_id 556}; 557 558static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 559 int attr_mask, struct ib_udata *udata) 560{ 561 int err; 562 563 err = 564 c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr, 565 attr_mask); 566 567 return err; 568} 569 570static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 571{ 572 pr_debug("%s:%u\n", __func__, __LINE__); 573 return -ENOSYS; 574} 575 576static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 577{ 578 pr_debug("%s:%u\n", __func__, __LINE__); 579 return -ENOSYS; 580} 581 582static int c2_process_mad(struct ib_device *ibdev, 583 int mad_flags, 584 u8 port_num, 585 struct ib_wc *in_wc, 586 struct ib_grh *in_grh, 587 struct ib_mad *in_mad, struct ib_mad *out_mad) 588{ 589 pr_debug("%s:%u\n", __func__, __LINE__); 590 return -ENOSYS; 591} 592 593static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 594{ 595 pr_debug("%s:%u\n", __func__, __LINE__); 596 597 /* Request a connection */ 598 return c2_llp_connect(cm_id, iw_param); 599} 600 601static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) 602{ 603 pr_debug("%s:%u\n", __func__, __LINE__); 604 605 /* Accept the new connection */ 606 return c2_llp_accept(cm_id, iw_param); 607} 608 609static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 610{ 611 int err; 612 613 pr_debug("%s:%u\n", __func__, __LINE__); 614 615 err = c2_llp_reject(cm_id, pdata, pdata_len); 616 return err; 617} 618 619static int c2_service_create(struct iw_cm_id *cm_id, int backlog) 620{ 621 int err; 622 623 pr_debug("%s:%u\n", __func__, __LINE__); 624 err = c2_llp_service_create(cm_id, backlog); 625 pr_debug("%s:%u err=%d\n", 626 __func__, __LINE__, 627 err); 628 return err; 629} 630 631static int c2_service_destroy(struct iw_cm_id *cm_id) 632{ 633 int err; 634 pr_debug("%s:%u\n", __func__, __LINE__); 635 636 err = c2_llp_service_destroy(cm_id); 637 638 return err; 639} 640 641static int c2_pseudo_up(struct net_device *netdev) 642{ 643 struct in_device *ind; 644 struct c2_dev *c2dev = netdev->ml_priv; 645 646 ind = in_dev_get(netdev); 647 if (!ind) 648 return 0; 649 650 pr_debug("adding...\n"); 651 for_ifa(ind) { 652#ifdef DEBUG 653 u8 *ip = (u8 *) & ifa->ifa_address; 654 655 pr_debug("%s: %d.%d.%d.%d\n", 656 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); 657#endif 658 c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); 659 } 660 endfor_ifa(ind); 661 in_dev_put(ind); 662 663 return 0; 664} 665 666static int c2_pseudo_down(struct net_device *netdev) 667{ 668 struct in_device *ind; 669 struct c2_dev *c2dev = netdev->ml_priv; 670 671 ind = in_dev_get(netdev); 672 if (!ind) 673 return 0; 674 675 pr_debug("deleting...\n"); 676 for_ifa(ind) { 677#ifdef DEBUG 678 u8 *ip = (u8 *) & ifa->ifa_address; 679 680 pr_debug("%s: %d.%d.%d.%d\n", 681 ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); 682#endif 683 c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); 684 } 685 endfor_ifa(ind); 686 in_dev_put(ind); 687 688 return 0; 689} 690 691static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 692{ 693 kfree_skb(skb); 694 return NETDEV_TX_OK; 695} 696 697static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu) 698{ 699 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 700 return -EINVAL; 701 702 netdev->mtu = new_mtu; 703 704 /* TODO: Tell rnic about new rmda interface mtu */ 705 return 0; 706} 707 708static const struct net_device_ops c2_pseudo_netdev_ops = { 709 .ndo_open = c2_pseudo_up, 710 .ndo_stop = c2_pseudo_down, 711 .ndo_start_xmit = c2_pseudo_xmit_frame, 712 .ndo_change_mtu = c2_pseudo_change_mtu, 713 .ndo_validate_addr = eth_validate_addr, 714}; 715 716static void setup(struct net_device *netdev) 717{ 718 netdev->netdev_ops = &c2_pseudo_netdev_ops; 719 720 netdev->watchdog_timeo = 0; 721 netdev->type = ARPHRD_ETHER; 722 netdev->mtu = 1500; 723 netdev->hard_header_len = ETH_HLEN; 724 netdev->addr_len = ETH_ALEN; 725 netdev->tx_queue_len = 0; 726 netdev->flags |= IFF_NOARP; 727} 728 729static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) 730{ 731 char name[IFNAMSIZ]; 732 struct net_device *netdev; 733 734 /* change ethxxx to iwxxx */ 735 strcpy(name, "iw"); 736 strcat(name, &c2dev->netdev->name[3]); 737 netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup); 738 if (!netdev) { 739 printk(KERN_ERR PFX "%s - etherdev alloc failed", 740 __func__); 741 return NULL; 742 } 743 744 netdev->ml_priv = c2dev; 745 746 SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); 747 748 memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); 749 750 /* Print out the MAC address */ 751 pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr); 752 753#if 0 754 /* Disable network packets */ 755 netif_stop_queue(netdev); 756#endif 757 return netdev; 758} 759 760int c2_register_device(struct c2_dev *dev) 761{ 762 int ret = -ENOMEM; 763 int i; 764 765 /* Register pseudo network device */ 766 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 767 if (!dev->pseudo_netdev) 768 goto out; 769 770 ret = register_netdev(dev->pseudo_netdev); 771 if (ret) 772 goto out_free_netdev; 773 774 pr_debug("%s:%u\n", __func__, __LINE__); 775 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 776 dev->ibdev.owner = THIS_MODULE; 777 dev->ibdev.uverbs_cmd_mask = 778 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 779 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 780 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 781 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 782 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 783 (1ull << IB_USER_VERBS_CMD_REG_MR) | 784 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 785 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 786 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 787 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 788 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 789 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 790 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 791 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 792 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 793 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 794 (1ull << IB_USER_VERBS_CMD_POST_RECV); 795 796 dev->ibdev.node_type = RDMA_NODE_RNIC; 797 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 798 memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); 799 dev->ibdev.phys_port_cnt = 1; 800 dev->ibdev.num_comp_vectors = 1; 801 dev->ibdev.dma_device = &dev->pcidev->dev; 802 dev->ibdev.query_device = c2_query_device; 803 dev->ibdev.query_port = c2_query_port; 804 dev->ibdev.query_pkey = c2_query_pkey; 805 dev->ibdev.query_gid = c2_query_gid; 806 dev->ibdev.alloc_ucontext = c2_alloc_ucontext; 807 dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext; 808 dev->ibdev.mmap = c2_mmap_uar; 809 dev->ibdev.alloc_pd = c2_alloc_pd; 810 dev->ibdev.dealloc_pd = c2_dealloc_pd; 811 dev->ibdev.create_ah = c2_ah_create; 812 dev->ibdev.destroy_ah = c2_ah_destroy; 813 dev->ibdev.create_qp = c2_create_qp; 814 dev->ibdev.modify_qp = c2_modify_qp; 815 dev->ibdev.destroy_qp = c2_destroy_qp; 816 dev->ibdev.create_cq = c2_create_cq; 817 dev->ibdev.destroy_cq = c2_destroy_cq; 818 dev->ibdev.poll_cq = c2_poll_cq; 819 dev->ibdev.get_dma_mr = c2_get_dma_mr; 820 dev->ibdev.reg_phys_mr = c2_reg_phys_mr; 821 dev->ibdev.reg_user_mr = c2_reg_user_mr; 822 dev->ibdev.dereg_mr = c2_dereg_mr; 823 824 dev->ibdev.alloc_fmr = NULL; 825 dev->ibdev.unmap_fmr = NULL; 826 dev->ibdev.dealloc_fmr = NULL; 827 dev->ibdev.map_phys_fmr = NULL; 828 829 dev->ibdev.attach_mcast = c2_multicast_attach; 830 dev->ibdev.detach_mcast = c2_multicast_detach; 831 dev->ibdev.process_mad = c2_process_mad; 832 833 dev->ibdev.req_notify_cq = c2_arm_cq; 834 dev->ibdev.post_send = c2_post_send; 835 dev->ibdev.post_recv = c2_post_receive; 836 837 dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); 838 if (dev->ibdev.iwcm == NULL) { 839 ret = -ENOMEM; 840 goto out_unregister_netdev; 841 } 842 dev->ibdev.iwcm->add_ref = c2_add_ref; 843 dev->ibdev.iwcm->rem_ref = c2_rem_ref; 844 dev->ibdev.iwcm->get_qp = c2_get_qp; 845 dev->ibdev.iwcm->connect = c2_connect; 846 dev->ibdev.iwcm->accept = c2_accept; 847 dev->ibdev.iwcm->reject = c2_reject; 848 dev->ibdev.iwcm->create_listen = c2_service_create; 849 dev->ibdev.iwcm->destroy_listen = c2_service_destroy; 850 851 ret = ib_register_device(&dev->ibdev, NULL); 852 if (ret) 853 goto out_free_iwcm; 854 855 for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) { 856 ret = device_create_file(&dev->ibdev.dev, 857 c2_dev_attributes[i]); 858 if (ret) 859 goto out_unregister_ibdev; 860 } 861 goto out; 862 863out_unregister_ibdev: 864 ib_unregister_device(&dev->ibdev); 865out_free_iwcm: 866 kfree(dev->ibdev.iwcm); 867out_unregister_netdev: 868 unregister_netdev(dev->pseudo_netdev); 869out_free_netdev: 870 free_netdev(dev->pseudo_netdev); 871out: 872 pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret); 873 return ret; 874} 875 876void c2_unregister_device(struct c2_dev *dev) 877{ 878 pr_debug("%s:%u\n", __func__, __LINE__); 879 unregister_netdev(dev->pseudo_netdev); 880 free_netdev(dev->pseudo_netdev); 881 ib_unregister_device(&dev->ibdev); 882} 883