1/* 2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32#include <linux/module.h> 33#include <linux/moduleparam.h> 34#include <linux/device.h> 35#include <linux/netdevice.h> 36#include <linux/etherdevice.h> 37#include <linux/delay.h> 38#include <linux/errno.h> 39#include <linux/list.h> 40#include <linux/sched.h> 41#include <linux/spinlock.h> 42#include <linux/ethtool.h> 43#include <linux/rtnetlink.h> 44#include <linux/inetdevice.h> 45#include <linux/slab.h> 46 47#include <asm/io.h> 48#include <asm/irq.h> 49#include <asm/byteorder.h> 50 51#include <rdma/iw_cm.h> 52#include <rdma/ib_verbs.h> 53#include <rdma/ib_smi.h> 54#include <rdma/ib_umem.h> 55#include <rdma/ib_user_verbs.h> 56 57#include "cxio_hal.h" 58#include "iwch.h" 59#include "iwch_provider.h" 60#include "iwch_cm.h" 61#include "iwch_user.h" 62#include "common.h" 63 64static struct ib_ah *iwch_ah_create(struct ib_pd *pd, 65 struct ib_ah_attr *ah_attr) 66{ 67 return ERR_PTR(-ENOSYS); 68} 69 70static int iwch_ah_destroy(struct ib_ah *ah) 71{ 72 return -ENOSYS; 73} 74 75static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 76{ 77 return -ENOSYS; 78} 79 80static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 81{ 82 return -ENOSYS; 83} 84 85static int iwch_process_mad(struct ib_device *ibdev, 86 int mad_flags, 87 u8 port_num, 88 struct ib_wc *in_wc, 89 struct ib_grh *in_grh, 90 struct ib_mad *in_mad, struct ib_mad *out_mad) 91{ 92 return -ENOSYS; 93} 94 95static int iwch_dealloc_ucontext(struct ib_ucontext *context) 96{ 97 struct iwch_dev *rhp = to_iwch_dev(context->device); 98 struct iwch_ucontext *ucontext = to_iwch_ucontext(context); 99 struct iwch_mm_entry *mm, *tmp; 100 101 PDBG("%s context %p\n", __func__, context); 102 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 103 kfree(mm); 104 cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); 105 kfree(ucontext); 106 return 0; 107} 108 109static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, 110 struct ib_udata *udata) 111{ 112 struct iwch_ucontext *context; 113 struct iwch_dev *rhp = to_iwch_dev(ibdev); 114 115 PDBG("%s ibdev %p\n", __func__, ibdev); 116 context = kzalloc(sizeof(*context), GFP_KERNEL); 117 if (!context) 118 return ERR_PTR(-ENOMEM); 119 cxio_init_ucontext(&rhp->rdev, &context->uctx); 120 INIT_LIST_HEAD(&context->mmaps); 121 spin_lock_init(&context->mmap_lock); 122 return &context->ibucontext; 123} 124 125static int iwch_destroy_cq(struct ib_cq *ib_cq) 126{ 127 struct iwch_cq *chp; 128 129 PDBG("%s ib_cq %p\n", __func__, ib_cq); 130 chp = to_iwch_cq(ib_cq); 131 132 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); 133 atomic_dec(&chp->refcnt); 134 wait_event(chp->wait, !atomic_read(&chp->refcnt)); 135 136 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); 137 kfree(chp); 138 return 0; 139} 140 141static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector, 142 struct ib_ucontext *ib_context, 143 struct ib_udata *udata) 144{ 145 struct iwch_dev *rhp; 146 struct iwch_cq *chp; 147 struct iwch_create_cq_resp uresp; 148 struct iwch_create_cq_req ureq; 149 struct iwch_ucontext *ucontext = NULL; 150 static int warned; 151 size_t resplen; 152 153 PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); 154 rhp = to_iwch_dev(ibdev); 155 chp = kzalloc(sizeof(*chp), GFP_KERNEL); 156 if (!chp) 157 return ERR_PTR(-ENOMEM); 158 159 if (ib_context) { 160 ucontext = to_iwch_ucontext(ib_context); 161 if (!t3a_device(rhp)) { 162 if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { 163 kfree(chp); 164 return ERR_PTR(-EFAULT); 165 } 166 chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; 167 } 168 } 169 170 if (t3a_device(rhp)) { 171 172 /* 173 * T3A: Add some fluff to handle extra CQEs inserted 174 * for various errors. 175 * Additional CQE possibilities: 176 * TERMINATE, 177 * incoming RDMA WRITE Failures 178 * incoming RDMA READ REQUEST FAILUREs 179 * NOTE: We cannot ensure the CQ won't overflow. 180 */ 181 entries += 16; 182 } 183 entries = roundup_pow_of_two(entries); 184 chp->cq.size_log2 = ilog2(entries); 185 186 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { 187 kfree(chp); 188 return ERR_PTR(-ENOMEM); 189 } 190 chp->rhp = rhp; 191 chp->ibcq.cqe = 1 << chp->cq.size_log2; 192 spin_lock_init(&chp->lock); 193 spin_lock_init(&chp->comp_handler_lock); 194 atomic_set(&chp->refcnt, 1); 195 init_waitqueue_head(&chp->wait); 196 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { 197 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); 198 kfree(chp); 199 return ERR_PTR(-ENOMEM); 200 } 201 202 if (ucontext) { 203 struct iwch_mm_entry *mm; 204 205 mm = kmalloc(sizeof *mm, GFP_KERNEL); 206 if (!mm) { 207 iwch_destroy_cq(&chp->ibcq); 208 return ERR_PTR(-ENOMEM); 209 } 210 uresp.cqid = chp->cq.cqid; 211 uresp.size_log2 = chp->cq.size_log2; 212 spin_lock(&ucontext->mmap_lock); 213 uresp.key = ucontext->key; 214 ucontext->key += PAGE_SIZE; 215 spin_unlock(&ucontext->mmap_lock); 216 mm->key = uresp.key; 217 mm->addr = virt_to_phys(chp->cq.queue); 218 if (udata->outlen < sizeof uresp) { 219 if (!warned++) 220 printk(KERN_WARNING MOD "Warning - " 221 "downlevel libcxgb3 (non-fatal).\n"); 222 mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * 223 sizeof(struct t3_cqe)); 224 resplen = sizeof(struct iwch_create_cq_resp_v0); 225 } else { 226 mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * 227 sizeof(struct t3_cqe)); 228 uresp.memsize = mm->len; 229 uresp.reserved = 0; 230 resplen = sizeof uresp; 231 } 232 if (ib_copy_to_udata(udata, &uresp, resplen)) { 233 kfree(mm); 234 iwch_destroy_cq(&chp->ibcq); 235 return ERR_PTR(-EFAULT); 236 } 237 insert_mmap(ucontext, mm); 238 } 239 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", 240 chp->cq.cqid, chp, (1 << chp->cq.size_log2), 241 (unsigned long long) chp->cq.dma_addr); 242 return &chp->ibcq; 243} 244 245static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) 246{ 247#ifdef notyet 248 struct iwch_cq *chp = to_iwch_cq(cq); 249 struct t3_cq oldcq, newcq; 250 int ret; 251 252 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe); 253 254 /* We don't downsize... */ 255 if (cqe <= cq->cqe) 256 return 0; 257 258 /* create new t3_cq with new size */ 259 cqe = roundup_pow_of_two(cqe+1); 260 newcq.size_log2 = ilog2(cqe); 261 262 /* Dont allow resize to less than the current wce count */ 263 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { 264 return -ENOMEM; 265 } 266 267 /* Quiesce all QPs using this CQ */ 268 ret = iwch_quiesce_qps(chp); 269 if (ret) { 270 return ret; 271 } 272 273 ret = cxio_create_cq(&chp->rhp->rdev, &newcq); 274 if (ret) { 275 return ret; 276 } 277 278 /* copy CQEs */ 279 memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * 280 sizeof(struct t3_cqe)); 281 282 /* old iwch_qp gets new t3_cq but keeps old cqid */ 283 oldcq = chp->cq; 284 chp->cq = newcq; 285 chp->cq.cqid = oldcq.cqid; 286 287 /* resize new t3_cq to update the HW context */ 288 ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); 289 if (ret) { 290 chp->cq = oldcq; 291 return ret; 292 } 293 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; 294 295 /* destroy old t3_cq */ 296 oldcq.cqid = newcq.cqid; 297 ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); 298 if (ret) { 299 printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", 300 __func__, ret); 301 } 302 303 /* add user hooks here */ 304 305 /* resume qps */ 306 ret = iwch_resume_qps(chp); 307 return ret; 308#else 309 return -ENOSYS; 310#endif 311} 312 313static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 314{ 315 struct iwch_dev *rhp; 316 struct iwch_cq *chp; 317 enum t3_cq_opcode cq_op; 318 int err; 319 unsigned long flag; 320 u32 rptr; 321 322 chp = to_iwch_cq(ibcq); 323 rhp = chp->rhp; 324 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 325 cq_op = CQ_ARM_SE; 326 else 327 cq_op = CQ_ARM_AN; 328 if (chp->user_rptr_addr) { 329 if (get_user(rptr, chp->user_rptr_addr)) 330 return -EFAULT; 331 spin_lock_irqsave(&chp->lock, flag); 332 chp->cq.rptr = rptr; 333 } else 334 spin_lock_irqsave(&chp->lock, flag); 335 PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr); 336 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 337 spin_unlock_irqrestore(&chp->lock, flag); 338 if (err < 0) 339 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 340 chp->cq.cqid); 341 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) 342 err = 0; 343 return err; 344} 345 346static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 347{ 348 int len = vma->vm_end - vma->vm_start; 349 u32 key = vma->vm_pgoff << PAGE_SHIFT; 350 struct cxio_rdev *rdev_p; 351 int ret = 0; 352 struct iwch_mm_entry *mm; 353 struct iwch_ucontext *ucontext; 354 u64 addr; 355 356 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff, 357 key, len); 358 359 if (vma->vm_start & (PAGE_SIZE-1)) { 360 return -EINVAL; 361 } 362 363 rdev_p = &(to_iwch_dev(context->device)->rdev); 364 ucontext = to_iwch_ucontext(context); 365 366 mm = remove_mmap(ucontext, key, len); 367 if (!mm) 368 return -EINVAL; 369 addr = mm->addr; 370 kfree(mm); 371 372 if ((addr >= rdev_p->rnic_info.udbell_physbase) && 373 (addr < (rdev_p->rnic_info.udbell_physbase + 374 rdev_p->rnic_info.udbell_len))) { 375 376 /* 377 * Map T3 DB register. 378 */ 379 if (vma->vm_flags & VM_READ) { 380 return -EPERM; 381 } 382 383 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 384 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 385 vma->vm_flags &= ~VM_MAYREAD; 386 ret = io_remap_pfn_range(vma, vma->vm_start, 387 addr >> PAGE_SHIFT, 388 len, vma->vm_page_prot); 389 } else { 390 391 /* 392 * Map WQ or CQ contig dma memory... 393 */ 394 ret = remap_pfn_range(vma, vma->vm_start, 395 addr >> PAGE_SHIFT, 396 len, vma->vm_page_prot); 397 } 398 399 return ret; 400} 401 402static int iwch_deallocate_pd(struct ib_pd *pd) 403{ 404 struct iwch_dev *rhp; 405 struct iwch_pd *php; 406 407 php = to_iwch_pd(pd); 408 rhp = php->rhp; 409 PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); 410 cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); 411 kfree(php); 412 return 0; 413} 414 415static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, 416 struct ib_ucontext *context, 417 struct ib_udata *udata) 418{ 419 struct iwch_pd *php; 420 u32 pdid; 421 struct iwch_dev *rhp; 422 423 PDBG("%s ibdev %p\n", __func__, ibdev); 424 rhp = (struct iwch_dev *) ibdev; 425 pdid = cxio_hal_get_pdid(rhp->rdev.rscp); 426 if (!pdid) 427 return ERR_PTR(-EINVAL); 428 php = kzalloc(sizeof(*php), GFP_KERNEL); 429 if (!php) { 430 cxio_hal_put_pdid(rhp->rdev.rscp, pdid); 431 return ERR_PTR(-ENOMEM); 432 } 433 php->pdid = pdid; 434 php->rhp = rhp; 435 if (context) { 436 if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { 437 iwch_deallocate_pd(&php->ibpd); 438 return ERR_PTR(-EFAULT); 439 } 440 } 441 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); 442 return &php->ibpd; 443} 444 445static int iwch_dereg_mr(struct ib_mr *ib_mr) 446{ 447 struct iwch_dev *rhp; 448 struct iwch_mr *mhp; 449 u32 mmid; 450 451 PDBG("%s ib_mr %p\n", __func__, ib_mr); 452 /* There can be no memory windows */ 453 if (atomic_read(&ib_mr->usecnt)) 454 return -EINVAL; 455 456 mhp = to_iwch_mr(ib_mr); 457 rhp = mhp->rhp; 458 mmid = mhp->attr.stag >> 8; 459 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, 460 mhp->attr.pbl_addr); 461 iwch_free_pbl(mhp); 462 remove_handle(rhp, &rhp->mmidr, mmid); 463 if (mhp->kva) 464 kfree((void *) (unsigned long) mhp->kva); 465 if (mhp->umem) 466 ib_umem_release(mhp->umem); 467 PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); 468 kfree(mhp); 469 return 0; 470} 471 472static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, 473 struct ib_phys_buf *buffer_list, 474 int num_phys_buf, 475 int acc, 476 u64 *iova_start) 477{ 478 __be64 *page_list; 479 int shift; 480 u64 total_size; 481 int npages; 482 struct iwch_dev *rhp; 483 struct iwch_pd *php; 484 struct iwch_mr *mhp; 485 int ret; 486 487 PDBG("%s ib_pd %p\n", __func__, pd); 488 php = to_iwch_pd(pd); 489 rhp = php->rhp; 490 491 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 492 if (!mhp) 493 return ERR_PTR(-ENOMEM); 494 495 mhp->rhp = rhp; 496 497 /* First check that we have enough alignment */ 498 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { 499 ret = -EINVAL; 500 goto err; 501 } 502 503 if (num_phys_buf > 1 && 504 ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { 505 ret = -EINVAL; 506 goto err; 507 } 508 509 ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, 510 &total_size, &npages, &shift, &page_list); 511 if (ret) 512 goto err; 513 514 ret = iwch_alloc_pbl(mhp, npages); 515 if (ret) { 516 kfree(page_list); 517 goto err_pbl; 518 } 519 520 ret = iwch_write_pbl(mhp, page_list, npages, 0); 521 kfree(page_list); 522 if (ret) 523 goto err_pbl; 524 525 mhp->attr.pdid = php->pdid; 526 mhp->attr.zbva = 0; 527 528 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 529 mhp->attr.va_fbo = *iova_start; 530 mhp->attr.page_size = shift - 12; 531 532 mhp->attr.len = (u32) total_size; 533 mhp->attr.pbl_size = npages; 534 ret = iwch_register_mem(rhp, php, mhp, shift); 535 if (ret) 536 goto err_pbl; 537 538 return &mhp->ibmr; 539 540err_pbl: 541 iwch_free_pbl(mhp); 542 543err: 544 kfree(mhp); 545 return ERR_PTR(ret); 546 547} 548 549static int iwch_reregister_phys_mem(struct ib_mr *mr, 550 int mr_rereg_mask, 551 struct ib_pd *pd, 552 struct ib_phys_buf *buffer_list, 553 int num_phys_buf, 554 int acc, u64 * iova_start) 555{ 556 557 struct iwch_mr mh, *mhp; 558 struct iwch_pd *php; 559 struct iwch_dev *rhp; 560 __be64 *page_list = NULL; 561 int shift = 0; 562 u64 total_size; 563 int npages = 0; 564 int ret; 565 566 PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); 567 568 /* There can be no memory windows */ 569 if (atomic_read(&mr->usecnt)) 570 return -EINVAL; 571 572 mhp = to_iwch_mr(mr); 573 rhp = mhp->rhp; 574 php = to_iwch_pd(mr->pd); 575 576 /* make sure we are on the same adapter */ 577 if (rhp != php->rhp) 578 return -EINVAL; 579 580 memcpy(&mh, mhp, sizeof *mhp); 581 582 if (mr_rereg_mask & IB_MR_REREG_PD) 583 php = to_iwch_pd(pd); 584 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 585 mh.attr.perms = iwch_ib_to_tpt_access(acc); 586 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 587 ret = build_phys_page_list(buffer_list, num_phys_buf, 588 iova_start, 589 &total_size, &npages, 590 &shift, &page_list); 591 if (ret) 592 return ret; 593 } 594 595 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); 596 kfree(page_list); 597 if (ret) { 598 return ret; 599 } 600 if (mr_rereg_mask & IB_MR_REREG_PD) 601 mhp->attr.pdid = php->pdid; 602 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 603 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 604 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 605 mhp->attr.zbva = 0; 606 mhp->attr.va_fbo = *iova_start; 607 mhp->attr.page_size = shift - 12; 608 mhp->attr.len = (u32) total_size; 609 mhp->attr.pbl_size = npages; 610 } 611 612 return 0; 613} 614 615 616static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 617 u64 virt, int acc, struct ib_udata *udata) 618{ 619 __be64 *pages; 620 int shift, n, len; 621 int i, k, entry; 622 int err = 0; 623 struct iwch_dev *rhp; 624 struct iwch_pd *php; 625 struct iwch_mr *mhp; 626 struct iwch_reg_user_mr_resp uresp; 627 struct scatterlist *sg; 628 PDBG("%s ib_pd %p\n", __func__, pd); 629 630 php = to_iwch_pd(pd); 631 rhp = php->rhp; 632 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 633 if (!mhp) 634 return ERR_PTR(-ENOMEM); 635 636 mhp->rhp = rhp; 637 638 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); 639 if (IS_ERR(mhp->umem)) { 640 err = PTR_ERR(mhp->umem); 641 kfree(mhp); 642 return ERR_PTR(err); 643 } 644 645 shift = ffs(mhp->umem->page_size) - 1; 646 647 n = mhp->umem->nmap; 648 649 err = iwch_alloc_pbl(mhp, n); 650 if (err) 651 goto err; 652 653 pages = (__be64 *) __get_free_page(GFP_KERNEL); 654 if (!pages) { 655 err = -ENOMEM; 656 goto err_pbl; 657 } 658 659 i = n = 0; 660 661 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { 662 len = sg_dma_len(sg) >> shift; 663 for (k = 0; k < len; ++k) { 664 pages[i++] = cpu_to_be64(sg_dma_address(sg) + 665 mhp->umem->page_size * k); 666 if (i == PAGE_SIZE / sizeof *pages) { 667 err = iwch_write_pbl(mhp, pages, i, n); 668 if (err) 669 goto pbl_done; 670 n += i; 671 i = 0; 672 } 673 } 674 } 675 676 if (i) 677 err = iwch_write_pbl(mhp, pages, i, n); 678 679pbl_done: 680 free_page((unsigned long) pages); 681 if (err) 682 goto err_pbl; 683 684 mhp->attr.pdid = php->pdid; 685 mhp->attr.zbva = 0; 686 mhp->attr.perms = iwch_ib_to_tpt_access(acc); 687 mhp->attr.va_fbo = virt; 688 mhp->attr.page_size = shift - 12; 689 mhp->attr.len = (u32) length; 690 691 err = iwch_register_mem(rhp, php, mhp, shift); 692 if (err) 693 goto err_pbl; 694 695 if (udata && !t3a_device(rhp)) { 696 uresp.pbl_addr = (mhp->attr.pbl_addr - 697 rhp->rdev.rnic_info.pbl_base) >> 3; 698 PDBG("%s user resp pbl_addr 0x%x\n", __func__, 699 uresp.pbl_addr); 700 701 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 702 iwch_dereg_mr(&mhp->ibmr); 703 err = -EFAULT; 704 goto err; 705 } 706 } 707 708 return &mhp->ibmr; 709 710err_pbl: 711 iwch_free_pbl(mhp); 712 713err: 714 ib_umem_release(mhp->umem); 715 kfree(mhp); 716 return ERR_PTR(err); 717} 718 719static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) 720{ 721 struct ib_phys_buf bl; 722 u64 kva; 723 struct ib_mr *ibmr; 724 725 PDBG("%s ib_pd %p\n", __func__, pd); 726 727 /* 728 * T3 only supports 32 bits of size. 729 */ 730 bl.size = 0xffffffff; 731 bl.addr = 0; 732 kva = 0; 733 ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); 734 return ibmr; 735} 736 737static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) 738{ 739 struct iwch_dev *rhp; 740 struct iwch_pd *php; 741 struct iwch_mw *mhp; 742 u32 mmid; 743 u32 stag = 0; 744 int ret; 745 746 if (type != IB_MW_TYPE_1) 747 return ERR_PTR(-EINVAL); 748 749 php = to_iwch_pd(pd); 750 rhp = php->rhp; 751 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 752 if (!mhp) 753 return ERR_PTR(-ENOMEM); 754 ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); 755 if (ret) { 756 kfree(mhp); 757 return ERR_PTR(ret); 758 } 759 mhp->rhp = rhp; 760 mhp->attr.pdid = php->pdid; 761 mhp->attr.type = TPT_MW; 762 mhp->attr.stag = stag; 763 mmid = (stag) >> 8; 764 mhp->ibmw.rkey = stag; 765 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { 766 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 767 kfree(mhp); 768 return ERR_PTR(-ENOMEM); 769 } 770 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 771 return &(mhp->ibmw); 772} 773 774static int iwch_dealloc_mw(struct ib_mw *mw) 775{ 776 struct iwch_dev *rhp; 777 struct iwch_mw *mhp; 778 u32 mmid; 779 780 mhp = to_iwch_mw(mw); 781 rhp = mhp->rhp; 782 mmid = (mw->rkey) >> 8; 783 cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); 784 remove_handle(rhp, &rhp->mmidr, mmid); 785 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); 786 kfree(mhp); 787 return 0; 788} 789 790static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) 791{ 792 struct iwch_dev *rhp; 793 struct iwch_pd *php; 794 struct iwch_mr *mhp; 795 u32 mmid; 796 u32 stag = 0; 797 int ret = 0; 798 799 php = to_iwch_pd(pd); 800 rhp = php->rhp; 801 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); 802 if (!mhp) 803 goto err; 804 805 mhp->rhp = rhp; 806 ret = iwch_alloc_pbl(mhp, pbl_depth); 807 if (ret) 808 goto err1; 809 mhp->attr.pbl_size = pbl_depth; 810 ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid, 811 mhp->attr.pbl_size, mhp->attr.pbl_addr); 812 if (ret) 813 goto err2; 814 mhp->attr.pdid = php->pdid; 815 mhp->attr.type = TPT_NON_SHARED_MR; 816 mhp->attr.stag = stag; 817 mhp->attr.state = 1; 818 mmid = (stag) >> 8; 819 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 820 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) 821 goto err3; 822 823 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 824 return &(mhp->ibmr); 825err3: 826 cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, 827 mhp->attr.pbl_addr); 828err2: 829 iwch_free_pbl(mhp); 830err1: 831 kfree(mhp); 832err: 833 return ERR_PTR(ret); 834} 835 836static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl( 837 struct ib_device *device, 838 int page_list_len) 839{ 840 struct ib_fast_reg_page_list *page_list; 841 842 page_list = kmalloc(sizeof *page_list + page_list_len * sizeof(u64), 843 GFP_KERNEL); 844 if (!page_list) 845 return ERR_PTR(-ENOMEM); 846 847 page_list->page_list = (u64 *)(page_list + 1); 848 page_list->max_page_list_len = page_list_len; 849 850 return page_list; 851} 852 853static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list) 854{ 855 kfree(page_list); 856} 857 858static int iwch_destroy_qp(struct ib_qp *ib_qp) 859{ 860 struct iwch_dev *rhp; 861 struct iwch_qp *qhp; 862 struct iwch_qp_attributes attrs; 863 struct iwch_ucontext *ucontext; 864 865 qhp = to_iwch_qp(ib_qp); 866 rhp = qhp->rhp; 867 868 attrs.next_state = IWCH_QP_STATE_ERROR; 869 iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); 870 wait_event(qhp->wait, !qhp->ep); 871 872 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); 873 874 atomic_dec(&qhp->refcnt); 875 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 876 877 ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) 878 : NULL; 879 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 880 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 881 882 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__, 883 ib_qp, qhp->wq.qpid, qhp); 884 kfree(qhp); 885 return 0; 886} 887 888static struct ib_qp *iwch_create_qp(struct ib_pd *pd, 889 struct ib_qp_init_attr *attrs, 890 struct ib_udata *udata) 891{ 892 struct iwch_dev *rhp; 893 struct iwch_qp *qhp; 894 struct iwch_pd *php; 895 struct iwch_cq *schp; 896 struct iwch_cq *rchp; 897 struct iwch_create_qp_resp uresp; 898 int wqsize, sqsize, rqsize; 899 struct iwch_ucontext *ucontext; 900 901 PDBG("%s ib_pd %p\n", __func__, pd); 902 if (attrs->qp_type != IB_QPT_RC) 903 return ERR_PTR(-EINVAL); 904 php = to_iwch_pd(pd); 905 rhp = php->rhp; 906 schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); 907 rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); 908 if (!schp || !rchp) 909 return ERR_PTR(-EINVAL); 910 911 /* The RQT size must be # of entries + 1 rounded up to a power of two */ 912 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); 913 if (rqsize == attrs->cap.max_recv_wr) 914 rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); 915 916 /* T3 doesn't support RQT depth < 16 */ 917 if (rqsize < 16) 918 rqsize = 16; 919 920 if (rqsize > T3_MAX_RQ_SIZE) 921 return ERR_PTR(-EINVAL); 922 923 if (attrs->cap.max_inline_data > T3_MAX_INLINE) 924 return ERR_PTR(-EINVAL); 925 926 /* 927 * NOTE: The SQ and total WQ sizes don't need to be 928 * a power of two. However, all the code assumes 929 * they are. EG: Q_FREECNT() and friends. 930 */ 931 sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); 932 wqsize = roundup_pow_of_two(rqsize + sqsize); 933 934 /* 935 * Kernel users need more wq space for fastreg WRs which can take 936 * 2 WR fragments. 937 */ 938 ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; 939 if (!ucontext && wqsize < (rqsize + (2 * sqsize))) 940 wqsize = roundup_pow_of_two(rqsize + 941 roundup_pow_of_two(attrs->cap.max_send_wr * 2)); 942 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__, 943 wqsize, sqsize, rqsize); 944 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); 945 if (!qhp) 946 return ERR_PTR(-ENOMEM); 947 qhp->wq.size_log2 = ilog2(wqsize); 948 qhp->wq.rq_size_log2 = ilog2(rqsize); 949 qhp->wq.sq_size_log2 = ilog2(sqsize); 950 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, 951 ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { 952 kfree(qhp); 953 return ERR_PTR(-ENOMEM); 954 } 955 956 attrs->cap.max_recv_wr = rqsize - 1; 957 attrs->cap.max_send_wr = sqsize; 958 attrs->cap.max_inline_data = T3_MAX_INLINE; 959 960 qhp->rhp = rhp; 961 qhp->attr.pd = php->pdid; 962 qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; 963 qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; 964 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; 965 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; 966 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; 967 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; 968 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; 969 qhp->attr.state = IWCH_QP_STATE_IDLE; 970 qhp->attr.next_state = IWCH_QP_STATE_IDLE; 971 972 /* 973 * XXX - These don't get passed in from the openib user 974 * at create time. The CM sets them via a QP modify. 975 * Need to fix... I think the CM should 976 */ 977 qhp->attr.enable_rdma_read = 1; 978 qhp->attr.enable_rdma_write = 1; 979 qhp->attr.enable_bind = 1; 980 qhp->attr.max_ord = 1; 981 qhp->attr.max_ird = 1; 982 983 spin_lock_init(&qhp->lock); 984 init_waitqueue_head(&qhp->wait); 985 atomic_set(&qhp->refcnt, 1); 986 987 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { 988 cxio_destroy_qp(&rhp->rdev, &qhp->wq, 989 ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 990 kfree(qhp); 991 return ERR_PTR(-ENOMEM); 992 } 993 994 if (udata) { 995 996 struct iwch_mm_entry *mm1, *mm2; 997 998 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); 999 if (!mm1) { 1000 iwch_destroy_qp(&qhp->ibqp); 1001 return ERR_PTR(-ENOMEM); 1002 } 1003 1004 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); 1005 if (!mm2) { 1006 kfree(mm1); 1007 iwch_destroy_qp(&qhp->ibqp); 1008 return ERR_PTR(-ENOMEM); 1009 } 1010 1011 uresp.qpid = qhp->wq.qpid; 1012 uresp.size_log2 = qhp->wq.size_log2; 1013 uresp.sq_size_log2 = qhp->wq.sq_size_log2; 1014 uresp.rq_size_log2 = qhp->wq.rq_size_log2; 1015 spin_lock(&ucontext->mmap_lock); 1016 uresp.key = ucontext->key; 1017 ucontext->key += PAGE_SIZE; 1018 uresp.db_key = ucontext->key; 1019 ucontext->key += PAGE_SIZE; 1020 spin_unlock(&ucontext->mmap_lock); 1021 if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { 1022 kfree(mm1); 1023 kfree(mm2); 1024 iwch_destroy_qp(&qhp->ibqp); 1025 return ERR_PTR(-EFAULT); 1026 } 1027 mm1->key = uresp.key; 1028 mm1->addr = virt_to_phys(qhp->wq.queue); 1029 mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); 1030 insert_mmap(ucontext, mm1); 1031 mm2->key = uresp.db_key; 1032 mm2->addr = qhp->wq.udb & PAGE_MASK; 1033 mm2->len = PAGE_SIZE; 1034 insert_mmap(ucontext, mm2); 1035 } 1036 qhp->ibqp.qp_num = qhp->wq.qpid; 1037 init_timer(&(qhp->timer)); 1038 PDBG("%s sq_num_entries %d, rq_num_entries %d " 1039 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n", 1040 __func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, 1041 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, 1042 1 << qhp->wq.size_log2, qhp->wq.rq_addr); 1043 return &qhp->ibqp; 1044} 1045 1046static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1047 int attr_mask, struct ib_udata *udata) 1048{ 1049 struct iwch_dev *rhp; 1050 struct iwch_qp *qhp; 1051 enum iwch_qp_attr_mask mask = 0; 1052 struct iwch_qp_attributes attrs; 1053 1054 PDBG("%s ib_qp %p\n", __func__, ibqp); 1055 1056 /* iwarp does not support the RTR state */ 1057 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 1058 attr_mask &= ~IB_QP_STATE; 1059 1060 /* Make sure we still have something left to do */ 1061 if (!attr_mask) 1062 return 0; 1063 1064 memset(&attrs, 0, sizeof attrs); 1065 qhp = to_iwch_qp(ibqp); 1066 rhp = qhp->rhp; 1067 1068 attrs.next_state = iwch_convert_state(attr->qp_state); 1069 attrs.enable_rdma_read = (attr->qp_access_flags & 1070 IB_ACCESS_REMOTE_READ) ? 1 : 0; 1071 attrs.enable_rdma_write = (attr->qp_access_flags & 1072 IB_ACCESS_REMOTE_WRITE) ? 1 : 0; 1073 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; 1074 1075 1076 mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; 1077 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? 1078 (IWCH_QP_ATTR_ENABLE_RDMA_READ | 1079 IWCH_QP_ATTR_ENABLE_RDMA_WRITE | 1080 IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1081 1082 return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); 1083} 1084 1085void iwch_qp_add_ref(struct ib_qp *qp) 1086{ 1087 PDBG("%s ib_qp %p\n", __func__, qp); 1088 atomic_inc(&(to_iwch_qp(qp)->refcnt)); 1089} 1090 1091void iwch_qp_rem_ref(struct ib_qp *qp) 1092{ 1093 PDBG("%s ib_qp %p\n", __func__, qp); 1094 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) 1095 wake_up(&(to_iwch_qp(qp)->wait)); 1096} 1097 1098static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 1099{ 1100 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); 1101 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 1102} 1103 1104 1105static int iwch_query_pkey(struct ib_device *ibdev, 1106 u8 port, u16 index, u16 * pkey) 1107{ 1108 PDBG("%s ibdev %p\n", __func__, ibdev); 1109 *pkey = 0; 1110 return 0; 1111} 1112 1113static int iwch_query_gid(struct ib_device *ibdev, u8 port, 1114 int index, union ib_gid *gid) 1115{ 1116 struct iwch_dev *dev; 1117 1118 PDBG("%s ibdev %p, port %d, index %d, gid %p\n", 1119 __func__, ibdev, port, index, gid); 1120 dev = to_iwch_dev(ibdev); 1121 BUG_ON(port == 0 || port > 2); 1122 memset(&(gid->raw[0]), 0, sizeof(gid->raw)); 1123 memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6); 1124 return 0; 1125} 1126 1127static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev) 1128{ 1129 struct ethtool_drvinfo info; 1130 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1131 char *cp, *next; 1132 unsigned fw_maj, fw_min, fw_mic; 1133 1134 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1135 1136 next = info.fw_version + 1; 1137 cp = strsep(&next, "."); 1138 sscanf(cp, "%i", &fw_maj); 1139 cp = strsep(&next, "."); 1140 sscanf(cp, "%i", &fw_min); 1141 cp = strsep(&next, "."); 1142 sscanf(cp, "%i", &fw_mic); 1143 1144 return (((u64)fw_maj & 0xffff) << 32) | ((fw_min & 0xffff) << 16) | 1145 (fw_mic & 0xffff); 1146} 1147 1148static int iwch_query_device(struct ib_device *ibdev, 1149 struct ib_device_attr *props) 1150{ 1151 1152 struct iwch_dev *dev; 1153 PDBG("%s ibdev %p\n", __func__, ibdev); 1154 1155 dev = to_iwch_dev(ibdev); 1156 memset(props, 0, sizeof *props); 1157 memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1158 props->hw_ver = dev->rdev.t3cdev_p->type; 1159 props->fw_ver = fw_vers_string_to_u64(dev); 1160 props->device_cap_flags = dev->device_cap_flags; 1161 props->page_size_cap = dev->attr.mem_pgsizes_bitmask; 1162 props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; 1163 props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; 1164 props->max_mr_size = dev->attr.max_mr_size; 1165 props->max_qp = dev->attr.max_qps; 1166 props->max_qp_wr = dev->attr.max_wrs; 1167 props->max_sge = dev->attr.max_sge_per_wr; 1168 props->max_sge_rd = 1; 1169 props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; 1170 props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp; 1171 props->max_cq = dev->attr.max_cqs; 1172 props->max_cqe = dev->attr.max_cqes_per_cq; 1173 props->max_mr = dev->attr.max_mem_regs; 1174 props->max_pd = dev->attr.max_pds; 1175 props->local_ca_ack_delay = 0; 1176 props->max_fast_reg_page_list_len = T3_MAX_FASTREG_DEPTH; 1177 1178 return 0; 1179} 1180 1181static int iwch_query_port(struct ib_device *ibdev, 1182 u8 port, struct ib_port_attr *props) 1183{ 1184 struct iwch_dev *dev; 1185 struct net_device *netdev; 1186 struct in_device *inetdev; 1187 1188 PDBG("%s ibdev %p\n", __func__, ibdev); 1189 1190 dev = to_iwch_dev(ibdev); 1191 netdev = dev->rdev.port_info.lldevs[port-1]; 1192 1193 memset(props, 0, sizeof(struct ib_port_attr)); 1194 props->max_mtu = IB_MTU_4096; 1195 if (netdev->mtu >= 4096) 1196 props->active_mtu = IB_MTU_4096; 1197 else if (netdev->mtu >= 2048) 1198 props->active_mtu = IB_MTU_2048; 1199 else if (netdev->mtu >= 1024) 1200 props->active_mtu = IB_MTU_1024; 1201 else if (netdev->mtu >= 512) 1202 props->active_mtu = IB_MTU_512; 1203 else 1204 props->active_mtu = IB_MTU_256; 1205 1206 if (!netif_carrier_ok(netdev)) 1207 props->state = IB_PORT_DOWN; 1208 else { 1209 inetdev = in_dev_get(netdev); 1210 if (inetdev) { 1211 if (inetdev->ifa_list) 1212 props->state = IB_PORT_ACTIVE; 1213 else 1214 props->state = IB_PORT_INIT; 1215 in_dev_put(inetdev); 1216 } else 1217 props->state = IB_PORT_INIT; 1218 } 1219 1220 props->port_cap_flags = 1221 IB_PORT_CM_SUP | 1222 IB_PORT_SNMP_TUNNEL_SUP | 1223 IB_PORT_REINIT_SUP | 1224 IB_PORT_DEVICE_MGMT_SUP | 1225 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 1226 props->gid_tbl_len = 1; 1227 props->pkey_tbl_len = 1; 1228 props->active_width = 2; 1229 props->active_speed = IB_SPEED_DDR; 1230 props->max_msg_sz = -1; 1231 1232 return 0; 1233} 1234 1235static ssize_t show_rev(struct device *dev, struct device_attribute *attr, 1236 char *buf) 1237{ 1238 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1239 ibdev.dev); 1240 PDBG("%s dev 0x%p\n", __func__, dev); 1241 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); 1242} 1243 1244static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) 1245{ 1246 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1247 ibdev.dev); 1248 struct ethtool_drvinfo info; 1249 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1250 1251 PDBG("%s dev 0x%p\n", __func__, dev); 1252 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1253 return sprintf(buf, "%s\n", info.fw_version); 1254} 1255 1256static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 1257 char *buf) 1258{ 1259 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1260 ibdev.dev); 1261 struct ethtool_drvinfo info; 1262 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1263 1264 PDBG("%s dev 0x%p\n", __func__, dev); 1265 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1266 return sprintf(buf, "%s\n", info.driver); 1267} 1268 1269static ssize_t show_board(struct device *dev, struct device_attribute *attr, 1270 char *buf) 1271{ 1272 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, 1273 ibdev.dev); 1274 PDBG("%s dev 0x%p\n", __func__, dev); 1275 return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor, 1276 iwch_dev->rdev.rnic_info.pdev->device); 1277} 1278 1279static int iwch_get_mib(struct ib_device *ibdev, 1280 union rdma_protocol_stats *stats) 1281{ 1282 struct iwch_dev *dev; 1283 struct tp_mib_stats m; 1284 int ret; 1285 1286 PDBG("%s ibdev %p\n", __func__, ibdev); 1287 dev = to_iwch_dev(ibdev); 1288 ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m); 1289 if (ret) 1290 return -ENOSYS; 1291 1292 memset(stats, 0, sizeof *stats); 1293 stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) + 1294 m.ipInReceive_lo; 1295 stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) + 1296 m.ipInHdrErrors_lo; 1297 stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) + 1298 m.ipInAddrErrors_lo; 1299 stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) + 1300 m.ipInUnknownProtos_lo; 1301 stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) + 1302 m.ipInDiscards_lo; 1303 stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) + 1304 m.ipInDelivers_lo; 1305 stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) + 1306 m.ipOutRequests_lo; 1307 stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) + 1308 m.ipOutDiscards_lo; 1309 stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) + 1310 m.ipOutNoRoutes_lo; 1311 stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout; 1312 stats->iw.ipReasmReqds = (u64) m.ipReasmReqds; 1313 stats->iw.ipReasmOKs = (u64) m.ipReasmOKs; 1314 stats->iw.ipReasmFails = (u64) m.ipReasmFails; 1315 stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens; 1316 stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens; 1317 stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails; 1318 stats->iw.tcpEstabResets = (u64) m.tcpEstabResets; 1319 stats->iw.tcpOutRsts = (u64) m.tcpOutRsts; 1320 stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab; 1321 stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) + 1322 m.tcpInSegs_lo; 1323 stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) + 1324 m.tcpOutSegs_lo; 1325 stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) + 1326 m.tcpRetransSeg_lo; 1327 stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) + 1328 m.tcpInErrs_lo; 1329 stats->iw.tcpRtoMin = (u64) m.tcpRtoMin; 1330 stats->iw.tcpRtoMax = (u64) m.tcpRtoMax; 1331 return 0; 1332} 1333 1334static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 1335static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1336static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 1337static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 1338 1339static struct device_attribute *iwch_class_attributes[] = { 1340 &dev_attr_hw_rev, 1341 &dev_attr_fw_ver, 1342 &dev_attr_hca_type, 1343 &dev_attr_board_id, 1344}; 1345 1346int iwch_register_device(struct iwch_dev *dev) 1347{ 1348 int ret; 1349 int i; 1350 1351 PDBG("%s iwch_dev %p\n", __func__, dev); 1352 strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); 1353 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1354 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1355 dev->ibdev.owner = THIS_MODULE; 1356 dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | 1357 IB_DEVICE_MEM_WINDOW | 1358 IB_DEVICE_MEM_MGT_EXTENSIONS; 1359 1360 /* cxgb3 supports STag 0. */ 1361 dev->ibdev.local_dma_lkey = 0; 1362 1363 dev->ibdev.uverbs_cmd_mask = 1364 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 1365 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 1366 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 1367 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 1368 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 1369 (1ull << IB_USER_VERBS_CMD_REG_MR) | 1370 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 1371 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 1372 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 1373 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 1374 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 1375 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 1376 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 1377 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 1378 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 1379 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 1380 (1ull << IB_USER_VERBS_CMD_POST_RECV); 1381 dev->ibdev.node_type = RDMA_NODE_RNIC; 1382 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1383 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1384 dev->ibdev.num_comp_vectors = 1; 1385 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1386 dev->ibdev.query_device = iwch_query_device; 1387 dev->ibdev.query_port = iwch_query_port; 1388 dev->ibdev.query_pkey = iwch_query_pkey; 1389 dev->ibdev.query_gid = iwch_query_gid; 1390 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; 1391 dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; 1392 dev->ibdev.mmap = iwch_mmap; 1393 dev->ibdev.alloc_pd = iwch_allocate_pd; 1394 dev->ibdev.dealloc_pd = iwch_deallocate_pd; 1395 dev->ibdev.create_ah = iwch_ah_create; 1396 dev->ibdev.destroy_ah = iwch_ah_destroy; 1397 dev->ibdev.create_qp = iwch_create_qp; 1398 dev->ibdev.modify_qp = iwch_ib_modify_qp; 1399 dev->ibdev.destroy_qp = iwch_destroy_qp; 1400 dev->ibdev.create_cq = iwch_create_cq; 1401 dev->ibdev.destroy_cq = iwch_destroy_cq; 1402 dev->ibdev.resize_cq = iwch_resize_cq; 1403 dev->ibdev.poll_cq = iwch_poll_cq; 1404 dev->ibdev.get_dma_mr = iwch_get_dma_mr; 1405 dev->ibdev.reg_phys_mr = iwch_register_phys_mem; 1406 dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; 1407 dev->ibdev.reg_user_mr = iwch_reg_user_mr; 1408 dev->ibdev.dereg_mr = iwch_dereg_mr; 1409 dev->ibdev.alloc_mw = iwch_alloc_mw; 1410 dev->ibdev.bind_mw = iwch_bind_mw; 1411 dev->ibdev.dealloc_mw = iwch_dealloc_mw; 1412 dev->ibdev.alloc_fast_reg_mr = iwch_alloc_fast_reg_mr; 1413 dev->ibdev.alloc_fast_reg_page_list = iwch_alloc_fastreg_pbl; 1414 dev->ibdev.free_fast_reg_page_list = iwch_free_fastreg_pbl; 1415 dev->ibdev.attach_mcast = iwch_multicast_attach; 1416 dev->ibdev.detach_mcast = iwch_multicast_detach; 1417 dev->ibdev.process_mad = iwch_process_mad; 1418 dev->ibdev.req_notify_cq = iwch_arm_cq; 1419 dev->ibdev.post_send = iwch_post_send; 1420 dev->ibdev.post_recv = iwch_post_receive; 1421 dev->ibdev.get_protocol_stats = iwch_get_mib; 1422 dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; 1423 1424 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 1425 if (!dev->ibdev.iwcm) 1426 return -ENOMEM; 1427 1428 dev->ibdev.iwcm->connect = iwch_connect; 1429 dev->ibdev.iwcm->accept = iwch_accept_cr; 1430 dev->ibdev.iwcm->reject = iwch_reject_cr; 1431 dev->ibdev.iwcm->create_listen = iwch_create_listen; 1432 dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen; 1433 dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; 1434 dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; 1435 dev->ibdev.iwcm->get_qp = iwch_get_qp; 1436 1437 ret = ib_register_device(&dev->ibdev, NULL); 1438 if (ret) 1439 goto bail1; 1440 1441 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) { 1442 ret = device_create_file(&dev->ibdev.dev, 1443 iwch_class_attributes[i]); 1444 if (ret) { 1445 goto bail2; 1446 } 1447 } 1448 return 0; 1449bail2: 1450 ib_unregister_device(&dev->ibdev); 1451bail1: 1452 kfree(dev->ibdev.iwcm); 1453 return ret; 1454} 1455 1456void iwch_unregister_device(struct iwch_dev *dev) 1457{ 1458 int i; 1459 1460 PDBG("%s iwch_dev %p\n", __func__, dev); 1461 for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) 1462 device_remove_file(&dev->ibdev.dev, 1463 iwch_class_attributes[i]); 1464 ib_unregister_device(&dev->ibdev); 1465 kfree(dev->ibdev.iwcm); 1466 return; 1467} 1468