1/* 2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35#include <rdma/ib_mad.h> 36#include <rdma/ib_user_verbs.h> 37#include <linux/io.h> 38#include <linux/module.h> 39#include <linux/utsname.h> 40#include <linux/rculist.h> 41#include <linux/mm.h> 42#include <linux/random.h> 43#include <linux/vmalloc.h> 44 45#include "qib.h" 46#include "qib_common.h" 47 48static unsigned int ib_qib_qp_table_size = 256; 49module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); 50MODULE_PARM_DESC(qp_table_size, "QP table size"); 51 52unsigned int ib_qib_lkey_table_size = 16; 53module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, 54 S_IRUGO); 55MODULE_PARM_DESC(lkey_table_size, 56 "LKEY table size in bits (2^n, 1 <= n <= 23)"); 57 58static unsigned int ib_qib_max_pds = 0xFFFF; 59module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); 60MODULE_PARM_DESC(max_pds, 61 "Maximum number of protection domains to support"); 62 63static unsigned int ib_qib_max_ahs = 0xFFFF; 64module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); 65MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); 66 67unsigned int ib_qib_max_cqes = 0x2FFFF; 68module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); 69MODULE_PARM_DESC(max_cqes, 70 "Maximum number of completion queue entries to support"); 71 72unsigned int ib_qib_max_cqs = 0x1FFFF; 73module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); 74MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); 75 76unsigned int ib_qib_max_qp_wrs = 0x3FFF; 77module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); 78MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); 79 80unsigned int ib_qib_max_qps = 16384; 81module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); 82MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); 83 84unsigned int ib_qib_max_sges = 0x60; 85module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); 86MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); 87 88unsigned int ib_qib_max_mcast_grps = 16384; 89module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); 90MODULE_PARM_DESC(max_mcast_grps, 91 "Maximum number of multicast groups to support"); 92 93unsigned int ib_qib_max_mcast_qp_attached = 16; 94module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, 95 uint, S_IRUGO); 96MODULE_PARM_DESC(max_mcast_qp_attached, 97 "Maximum number of attached QPs to support"); 98 99unsigned int ib_qib_max_srqs = 1024; 100module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); 101MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); 102 103unsigned int ib_qib_max_srq_sges = 128; 104module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); 105MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); 106 107unsigned int ib_qib_max_srq_wrs = 0x1FFFF; 108module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); 109MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); 110 111static unsigned int ib_qib_disable_sma; 112module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); 113MODULE_PARM_DESC(disable_sma, "Disable the SMA"); 114 115/* 116 * Note that it is OK to post send work requests in the SQE and ERR 117 * states; qib_do_send() will process them and generate error 118 * completions as per IB 1.2 C10-96. 119 */ 120const int ib_qib_state_ops[IB_QPS_ERR + 1] = { 121 [IB_QPS_RESET] = 0, 122 [IB_QPS_INIT] = QIB_POST_RECV_OK, 123 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, 124 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 125 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | 126 QIB_PROCESS_NEXT_SEND_OK, 127 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 128 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, 129 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | 130 QIB_POST_SEND_OK | QIB_FLUSH_SEND, 131 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | 132 QIB_POST_SEND_OK | QIB_FLUSH_SEND, 133}; 134 135struct qib_ucontext { 136 struct ib_ucontext ibucontext; 137}; 138 139static inline struct qib_ucontext *to_iucontext(struct ib_ucontext 140 *ibucontext) 141{ 142 return container_of(ibucontext, struct qib_ucontext, ibucontext); 143} 144 145/* 146 * Translate ib_wr_opcode into ib_wc_opcode. 147 */ 148const enum ib_wc_opcode ib_qib_wc_opcode[] = { 149 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, 150 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, 151 [IB_WR_SEND] = IB_WC_SEND, 152 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, 153 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, 154 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, 155 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD 156}; 157 158/* 159 * System image GUID. 160 */ 161__be64 ib_qib_sys_image_guid; 162 163/** 164 * qib_copy_sge - copy data to SGE memory 165 * @ss: the SGE state 166 * @data: the data to copy 167 * @length: the length of the data 168 */ 169void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) 170{ 171 struct qib_sge *sge = &ss->sge; 172 173 while (length) { 174 u32 len = sge->length; 175 176 if (len > length) 177 len = length; 178 if (len > sge->sge_length) 179 len = sge->sge_length; 180 BUG_ON(len == 0); 181 memcpy(sge->vaddr, data, len); 182 sge->vaddr += len; 183 sge->length -= len; 184 sge->sge_length -= len; 185 if (sge->sge_length == 0) { 186 if (release) 187 qib_put_mr(sge->mr); 188 if (--ss->num_sge) 189 *sge = *ss->sg_list++; 190 } else if (sge->length == 0 && sge->mr->lkey) { 191 if (++sge->n >= QIB_SEGSZ) { 192 if (++sge->m >= sge->mr->mapsz) 193 break; 194 sge->n = 0; 195 } 196 sge->vaddr = 197 sge->mr->map[sge->m]->segs[sge->n].vaddr; 198 sge->length = 199 sge->mr->map[sge->m]->segs[sge->n].length; 200 } 201 data += len; 202 length -= len; 203 } 204} 205 206/** 207 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func 208 * @ss: the SGE state 209 * @length: the number of bytes to skip 210 */ 211void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) 212{ 213 struct qib_sge *sge = &ss->sge; 214 215 while (length) { 216 u32 len = sge->length; 217 218 if (len > length) 219 len = length; 220 if (len > sge->sge_length) 221 len = sge->sge_length; 222 BUG_ON(len == 0); 223 sge->vaddr += len; 224 sge->length -= len; 225 sge->sge_length -= len; 226 if (sge->sge_length == 0) { 227 if (release) 228 qib_put_mr(sge->mr); 229 if (--ss->num_sge) 230 *sge = *ss->sg_list++; 231 } else if (sge->length == 0 && sge->mr->lkey) { 232 if (++sge->n >= QIB_SEGSZ) { 233 if (++sge->m >= sge->mr->mapsz) 234 break; 235 sge->n = 0; 236 } 237 sge->vaddr = 238 sge->mr->map[sge->m]->segs[sge->n].vaddr; 239 sge->length = 240 sge->mr->map[sge->m]->segs[sge->n].length; 241 } 242 length -= len; 243 } 244} 245 246/* 247 * Count the number of DMA descriptors needed to send length bytes of data. 248 * Don't modify the qib_sge_state to get the count. 249 * Return zero if any of the segments is not aligned. 250 */ 251static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) 252{ 253 struct qib_sge *sg_list = ss->sg_list; 254 struct qib_sge sge = ss->sge; 255 u8 num_sge = ss->num_sge; 256 u32 ndesc = 1; /* count the header */ 257 258 while (length) { 259 u32 len = sge.length; 260 261 if (len > length) 262 len = length; 263 if (len > sge.sge_length) 264 len = sge.sge_length; 265 BUG_ON(len == 0); 266 if (((long) sge.vaddr & (sizeof(u32) - 1)) || 267 (len != length && (len & (sizeof(u32) - 1)))) { 268 ndesc = 0; 269 break; 270 } 271 ndesc++; 272 sge.vaddr += len; 273 sge.length -= len; 274 sge.sge_length -= len; 275 if (sge.sge_length == 0) { 276 if (--num_sge) 277 sge = *sg_list++; 278 } else if (sge.length == 0 && sge.mr->lkey) { 279 if (++sge.n >= QIB_SEGSZ) { 280 if (++sge.m >= sge.mr->mapsz) 281 break; 282 sge.n = 0; 283 } 284 sge.vaddr = 285 sge.mr->map[sge.m]->segs[sge.n].vaddr; 286 sge.length = 287 sge.mr->map[sge.m]->segs[sge.n].length; 288 } 289 length -= len; 290 } 291 return ndesc; 292} 293 294/* 295 * Copy from the SGEs to the data buffer. 296 */ 297static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) 298{ 299 struct qib_sge *sge = &ss->sge; 300 301 while (length) { 302 u32 len = sge->length; 303 304 if (len > length) 305 len = length; 306 if (len > sge->sge_length) 307 len = sge->sge_length; 308 BUG_ON(len == 0); 309 memcpy(data, sge->vaddr, len); 310 sge->vaddr += len; 311 sge->length -= len; 312 sge->sge_length -= len; 313 if (sge->sge_length == 0) { 314 if (--ss->num_sge) 315 *sge = *ss->sg_list++; 316 } else if (sge->length == 0 && sge->mr->lkey) { 317 if (++sge->n >= QIB_SEGSZ) { 318 if (++sge->m >= sge->mr->mapsz) 319 break; 320 sge->n = 0; 321 } 322 sge->vaddr = 323 sge->mr->map[sge->m]->segs[sge->n].vaddr; 324 sge->length = 325 sge->mr->map[sge->m]->segs[sge->n].length; 326 } 327 data += len; 328 length -= len; 329 } 330} 331 332/** 333 * qib_post_one_send - post one RC, UC, or UD send work request 334 * @qp: the QP to post on 335 * @wr: the work request to send 336 */ 337static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, 338 int *scheduled) 339{ 340 struct qib_swqe *wqe; 341 u32 next; 342 int i; 343 int j; 344 int acc; 345 int ret; 346 unsigned long flags; 347 struct qib_lkey_table *rkt; 348 struct qib_pd *pd; 349 350 spin_lock_irqsave(&qp->s_lock, flags); 351 352 /* Check that state is OK to post send. */ 353 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) 354 goto bail_inval; 355 356 /* IB spec says that num_sge == 0 is OK. */ 357 if (wr->num_sge > qp->s_max_sge) 358 goto bail_inval; 359 360 /* 361 * Don't allow RDMA reads or atomic operations on UC or 362 * undefined operations. 363 * Make sure buffer is large enough to hold the result for atomics. 364 */ 365 if (wr->opcode == IB_WR_FAST_REG_MR) { 366 if (qib_fast_reg_mr(qp, wr)) 367 goto bail_inval; 368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { 369 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) 370 goto bail_inval; 371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 372 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ 373 if (wr->opcode != IB_WR_SEND && 374 wr->opcode != IB_WR_SEND_WITH_IMM) 375 goto bail_inval; 376 /* Check UD destination address PD */ 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) 378 goto bail_inval; 379 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) 380 goto bail_inval; 381 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 382 (wr->num_sge == 0 || 383 wr->sg_list[0].length < sizeof(u64) || 384 wr->sg_list[0].addr & (sizeof(u64) - 1))) 385 goto bail_inval; 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) 387 goto bail_inval; 388 389 next = qp->s_head + 1; 390 if (next >= qp->s_size) 391 next = 0; 392 if (next == qp->s_last) { 393 ret = -ENOMEM; 394 goto bail; 395 } 396 397 rkt = &to_idev(qp->ibqp.device)->lk_table; 398 pd = to_ipd(qp->ibqp.pd); 399 wqe = get_swqe_ptr(qp, qp->s_head); 400 wqe->wr = *wr; 401 wqe->length = 0; 402 j = 0; 403 if (wr->num_sge) { 404 acc = wr->opcode >= IB_WR_RDMA_READ ? 405 IB_ACCESS_LOCAL_WRITE : 0; 406 for (i = 0; i < wr->num_sge; i++) { 407 u32 length = wr->sg_list[i].length; 408 int ok; 409 410 if (length == 0) 411 continue; 412 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], 413 &wr->sg_list[i], acc); 414 if (!ok) 415 goto bail_inval_free; 416 wqe->length += length; 417 j++; 418 } 419 wqe->wr.num_sge = j; 420 } 421 if (qp->ibqp.qp_type == IB_QPT_UC || 422 qp->ibqp.qp_type == IB_QPT_RC) { 423 if (wqe->length > 0x80000000U) 424 goto bail_inval_free; 425 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + 426 qp->port_num - 1)->ibmtu) 427 goto bail_inval_free; 428 else 429 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); 430 wqe->ssn = qp->s_ssn++; 431 qp->s_head = next; 432 433 ret = 0; 434 goto bail; 435 436bail_inval_free: 437 while (j) { 438 struct qib_sge *sge = &wqe->sg_list[--j]; 439 440 qib_put_mr(sge->mr); 441 } 442bail_inval: 443 ret = -EINVAL; 444bail: 445 if (!ret && !wr->next && 446 !qib_sdma_empty( 447 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { 448 qib_schedule_send(qp); 449 *scheduled = 1; 450 } 451 spin_unlock_irqrestore(&qp->s_lock, flags); 452 return ret; 453} 454 455/** 456 * qib_post_send - post a send on a QP 457 * @ibqp: the QP to post the send on 458 * @wr: the list of work requests to post 459 * @bad_wr: the first bad WR is put here 460 * 461 * This may be called from interrupt context. 462 */ 463static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 464 struct ib_send_wr **bad_wr) 465{ 466 struct qib_qp *qp = to_iqp(ibqp); 467 int err = 0; 468 int scheduled = 0; 469 470 for (; wr; wr = wr->next) { 471 err = qib_post_one_send(qp, wr, &scheduled); 472 if (err) { 473 *bad_wr = wr; 474 goto bail; 475 } 476 } 477 478 /* Try to do the send work in the caller's context. */ 479 if (!scheduled) 480 qib_do_send(&qp->s_work); 481 482bail: 483 return err; 484} 485 486/** 487 * qib_post_receive - post a receive on a QP 488 * @ibqp: the QP to post the receive on 489 * @wr: the WR to post 490 * @bad_wr: the first bad WR is put here 491 * 492 * This may be called from interrupt context. 493 */ 494static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 495 struct ib_recv_wr **bad_wr) 496{ 497 struct qib_qp *qp = to_iqp(ibqp); 498 struct qib_rwq *wq = qp->r_rq.wq; 499 unsigned long flags; 500 int ret; 501 502 /* Check that state is OK to post receive. */ 503 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { 504 *bad_wr = wr; 505 ret = -EINVAL; 506 goto bail; 507 } 508 509 for (; wr; wr = wr->next) { 510 struct qib_rwqe *wqe; 511 u32 next; 512 int i; 513 514 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { 515 *bad_wr = wr; 516 ret = -EINVAL; 517 goto bail; 518 } 519 520 spin_lock_irqsave(&qp->r_rq.lock, flags); 521 next = wq->head + 1; 522 if (next >= qp->r_rq.size) 523 next = 0; 524 if (next == wq->tail) { 525 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 526 *bad_wr = wr; 527 ret = -ENOMEM; 528 goto bail; 529 } 530 531 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); 532 wqe->wr_id = wr->wr_id; 533 wqe->num_sge = wr->num_sge; 534 for (i = 0; i < wr->num_sge; i++) 535 wqe->sg_list[i] = wr->sg_list[i]; 536 /* Make sure queue entry is written before the head index. */ 537 smp_wmb(); 538 wq->head = next; 539 spin_unlock_irqrestore(&qp->r_rq.lock, flags); 540 } 541 ret = 0; 542 543bail: 544 return ret; 545} 546 547/** 548 * qib_qp_rcv - processing an incoming packet on a QP 549 * @rcd: the context pointer 550 * @hdr: the packet header 551 * @has_grh: true if the packet has a GRH 552 * @data: the packet data 553 * @tlen: the packet length 554 * @qp: the QP the packet came on 555 * 556 * This is called from qib_ib_rcv() to process an incoming packet 557 * for the given QP. 558 * Called at interrupt level. 559 */ 560static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 561 int has_grh, void *data, u32 tlen, struct qib_qp *qp) 562{ 563 struct qib_ibport *ibp = &rcd->ppd->ibport_data; 564 565 spin_lock(&qp->r_lock); 566 567 /* Check for valid receive state. */ 568 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { 569 ibp->n_pkt_drops++; 570 goto unlock; 571 } 572 573 switch (qp->ibqp.qp_type) { 574 case IB_QPT_SMI: 575 case IB_QPT_GSI: 576 if (ib_qib_disable_sma) 577 break; 578 /* FALLTHROUGH */ 579 case IB_QPT_UD: 580 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); 581 break; 582 583 case IB_QPT_RC: 584 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); 585 break; 586 587 case IB_QPT_UC: 588 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); 589 break; 590 591 default: 592 break; 593 } 594 595unlock: 596 spin_unlock(&qp->r_lock); 597} 598 599/** 600 * qib_ib_rcv - process an incoming packet 601 * @rcd: the context pointer 602 * @rhdr: the header of the packet 603 * @data: the packet payload 604 * @tlen: the packet length 605 * 606 * This is called from qib_kreceive() to process an incoming packet at 607 * interrupt level. Tlen is the length of the header + data + CRC in bytes. 608 */ 609void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) 610{ 611 struct qib_pportdata *ppd = rcd->ppd; 612 struct qib_ibport *ibp = &ppd->ibport_data; 613 struct qib_ib_header *hdr = rhdr; 614 struct qib_other_headers *ohdr; 615 struct qib_qp *qp; 616 u32 qp_num; 617 int lnh; 618 u8 opcode; 619 u16 lid; 620 621 /* 24 == LRH+BTH+CRC */ 622 if (unlikely(tlen < 24)) 623 goto drop; 624 625 /* Check for a valid destination LID (see ch. 7.11.1). */ 626 lid = be16_to_cpu(hdr->lrh[1]); 627 if (lid < QIB_MULTICAST_LID_BASE) { 628 lid &= ~((1 << ppd->lmc) - 1); 629 if (unlikely(lid != ppd->lid)) 630 goto drop; 631 } 632 633 /* Check for GRH */ 634 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 635 if (lnh == QIB_LRH_BTH) 636 ohdr = &hdr->u.oth; 637 else if (lnh == QIB_LRH_GRH) { 638 u32 vtf; 639 640 ohdr = &hdr->u.l.oth; 641 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) 642 goto drop; 643 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); 644 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 645 goto drop; 646 } else 647 goto drop; 648 649 opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; 650#ifdef CONFIG_DEBUG_FS 651 rcd->opstats->stats[opcode].n_bytes += tlen; 652 rcd->opstats->stats[opcode].n_packets++; 653#endif 654 655 /* Get the destination QP number. */ 656 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 657 if (qp_num == QIB_MULTICAST_QPN) { 658 struct qib_mcast *mcast; 659 struct qib_mcast_qp *p; 660 661 if (lnh != QIB_LRH_GRH) 662 goto drop; 663 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); 664 if (mcast == NULL) 665 goto drop; 666 this_cpu_inc(ibp->pmastats->n_multicast_rcv); 667 list_for_each_entry_rcu(p, &mcast->qp_list, list) 668 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); 669 /* 670 * Notify qib_multicast_detach() if it is waiting for us 671 * to finish. 672 */ 673 if (atomic_dec_return(&mcast->refcount) <= 1) 674 wake_up(&mcast->wait); 675 } else { 676 if (rcd->lookaside_qp) { 677 if (rcd->lookaside_qpn != qp_num) { 678 if (atomic_dec_and_test( 679 &rcd->lookaside_qp->refcount)) 680 wake_up( 681 &rcd->lookaside_qp->wait); 682 rcd->lookaside_qp = NULL; 683 } 684 } 685 if (!rcd->lookaside_qp) { 686 qp = qib_lookup_qpn(ibp, qp_num); 687 if (!qp) 688 goto drop; 689 rcd->lookaside_qp = qp; 690 rcd->lookaside_qpn = qp_num; 691 } else 692 qp = rcd->lookaside_qp; 693 this_cpu_inc(ibp->pmastats->n_unicast_rcv); 694 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); 695 } 696 return; 697 698drop: 699 ibp->n_pkt_drops++; 700} 701 702/* 703 * This is called from a timer to check for QPs 704 * which need kernel memory in order to send a packet. 705 */ 706static void mem_timer(unsigned long data) 707{ 708 struct qib_ibdev *dev = (struct qib_ibdev *) data; 709 struct list_head *list = &dev->memwait; 710 struct qib_qp *qp = NULL; 711 unsigned long flags; 712 713 spin_lock_irqsave(&dev->pending_lock, flags); 714 if (!list_empty(list)) { 715 qp = list_entry(list->next, struct qib_qp, iowait); 716 list_del_init(&qp->iowait); 717 atomic_inc(&qp->refcount); 718 if (!list_empty(list)) 719 mod_timer(&dev->mem_timer, jiffies + 1); 720 } 721 spin_unlock_irqrestore(&dev->pending_lock, flags); 722 723 if (qp) { 724 spin_lock_irqsave(&qp->s_lock, flags); 725 if (qp->s_flags & QIB_S_WAIT_KMEM) { 726 qp->s_flags &= ~QIB_S_WAIT_KMEM; 727 qib_schedule_send(qp); 728 } 729 spin_unlock_irqrestore(&qp->s_lock, flags); 730 if (atomic_dec_and_test(&qp->refcount)) 731 wake_up(&qp->wait); 732 } 733} 734 735static void update_sge(struct qib_sge_state *ss, u32 length) 736{ 737 struct qib_sge *sge = &ss->sge; 738 739 sge->vaddr += length; 740 sge->length -= length; 741 sge->sge_length -= length; 742 if (sge->sge_length == 0) { 743 if (--ss->num_sge) 744 *sge = *ss->sg_list++; 745 } else if (sge->length == 0 && sge->mr->lkey) { 746 if (++sge->n >= QIB_SEGSZ) { 747 if (++sge->m >= sge->mr->mapsz) 748 return; 749 sge->n = 0; 750 } 751 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; 752 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; 753 } 754} 755 756#ifdef __LITTLE_ENDIAN 757static inline u32 get_upper_bits(u32 data, u32 shift) 758{ 759 return data >> shift; 760} 761 762static inline u32 set_upper_bits(u32 data, u32 shift) 763{ 764 return data << shift; 765} 766 767static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 768{ 769 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); 770 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 771 return data; 772} 773#else 774static inline u32 get_upper_bits(u32 data, u32 shift) 775{ 776 return data << shift; 777} 778 779static inline u32 set_upper_bits(u32 data, u32 shift) 780{ 781 return data >> shift; 782} 783 784static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) 785{ 786 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); 787 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); 788 return data; 789} 790#endif 791 792static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, 793 u32 length, unsigned flush_wc) 794{ 795 u32 extra = 0; 796 u32 data = 0; 797 u32 last; 798 799 while (1) { 800 u32 len = ss->sge.length; 801 u32 off; 802 803 if (len > length) 804 len = length; 805 if (len > ss->sge.sge_length) 806 len = ss->sge.sge_length; 807 BUG_ON(len == 0); 808 /* If the source address is not aligned, try to align it. */ 809 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); 810 if (off) { 811 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & 812 ~(sizeof(u32) - 1)); 813 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); 814 u32 y; 815 816 y = sizeof(u32) - off; 817 if (len > y) 818 len = y; 819 if (len + extra >= sizeof(u32)) { 820 data |= set_upper_bits(v, extra * 821 BITS_PER_BYTE); 822 len = sizeof(u32) - extra; 823 if (len == length) { 824 last = data; 825 break; 826 } 827 __raw_writel(data, piobuf); 828 piobuf++; 829 extra = 0; 830 data = 0; 831 } else { 832 /* Clear unused upper bytes */ 833 data |= clear_upper_bytes(v, len, extra); 834 if (len == length) { 835 last = data; 836 break; 837 } 838 extra += len; 839 } 840 } else if (extra) { 841 /* Source address is aligned. */ 842 u32 *addr = (u32 *) ss->sge.vaddr; 843 int shift = extra * BITS_PER_BYTE; 844 int ushift = 32 - shift; 845 u32 l = len; 846 847 while (l >= sizeof(u32)) { 848 u32 v = *addr; 849 850 data |= set_upper_bits(v, shift); 851 __raw_writel(data, piobuf); 852 data = get_upper_bits(v, ushift); 853 piobuf++; 854 addr++; 855 l -= sizeof(u32); 856 } 857 /* 858 * We still have 'extra' number of bytes leftover. 859 */ 860 if (l) { 861 u32 v = *addr; 862 863 if (l + extra >= sizeof(u32)) { 864 data |= set_upper_bits(v, shift); 865 len -= l + extra - sizeof(u32); 866 if (len == length) { 867 last = data; 868 break; 869 } 870 __raw_writel(data, piobuf); 871 piobuf++; 872 extra = 0; 873 data = 0; 874 } else { 875 /* Clear unused upper bytes */ 876 data |= clear_upper_bytes(v, l, extra); 877 if (len == length) { 878 last = data; 879 break; 880 } 881 extra += l; 882 } 883 } else if (len == length) { 884 last = data; 885 break; 886 } 887 } else if (len == length) { 888 u32 w; 889 890 /* 891 * Need to round up for the last dword in the 892 * packet. 893 */ 894 w = (len + 3) >> 2; 895 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); 896 piobuf += w - 1; 897 last = ((u32 *) ss->sge.vaddr)[w - 1]; 898 break; 899 } else { 900 u32 w = len >> 2; 901 902 qib_pio_copy(piobuf, ss->sge.vaddr, w); 903 piobuf += w; 904 905 extra = len & (sizeof(u32) - 1); 906 if (extra) { 907 u32 v = ((u32 *) ss->sge.vaddr)[w]; 908 909 /* Clear unused upper bytes */ 910 data = clear_upper_bytes(v, extra, 0); 911 } 912 } 913 update_sge(ss, len); 914 length -= len; 915 } 916 /* Update address before sending packet. */ 917 update_sge(ss, length); 918 if (flush_wc) { 919 /* must flush early everything before trigger word */ 920 qib_flush_wc(); 921 __raw_writel(last, piobuf); 922 /* be sure trigger word is written */ 923 qib_flush_wc(); 924 } else 925 __raw_writel(last, piobuf); 926} 927 928static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, 929 struct qib_qp *qp) 930{ 931 struct qib_verbs_txreq *tx; 932 unsigned long flags; 933 934 spin_lock_irqsave(&qp->s_lock, flags); 935 spin_lock(&dev->pending_lock); 936 937 if (!list_empty(&dev->txreq_free)) { 938 struct list_head *l = dev->txreq_free.next; 939 940 list_del(l); 941 spin_unlock(&dev->pending_lock); 942 spin_unlock_irqrestore(&qp->s_lock, flags); 943 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 944 } else { 945 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && 946 list_empty(&qp->iowait)) { 947 dev->n_txwait++; 948 qp->s_flags |= QIB_S_WAIT_TX; 949 list_add_tail(&qp->iowait, &dev->txwait); 950 } 951 qp->s_flags &= ~QIB_S_BUSY; 952 spin_unlock(&dev->pending_lock); 953 spin_unlock_irqrestore(&qp->s_lock, flags); 954 tx = ERR_PTR(-EBUSY); 955 } 956 return tx; 957} 958 959static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, 960 struct qib_qp *qp) 961{ 962 struct qib_verbs_txreq *tx; 963 unsigned long flags; 964 965 spin_lock_irqsave(&dev->pending_lock, flags); 966 /* assume the list non empty */ 967 if (likely(!list_empty(&dev->txreq_free))) { 968 struct list_head *l = dev->txreq_free.next; 969 970 list_del(l); 971 spin_unlock_irqrestore(&dev->pending_lock, flags); 972 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 973 } else { 974 /* call slow path to get the extra lock */ 975 spin_unlock_irqrestore(&dev->pending_lock, flags); 976 tx = __get_txreq(dev, qp); 977 } 978 return tx; 979} 980 981void qib_put_txreq(struct qib_verbs_txreq *tx) 982{ 983 struct qib_ibdev *dev; 984 struct qib_qp *qp; 985 unsigned long flags; 986 987 qp = tx->qp; 988 dev = to_idev(qp->ibqp.device); 989 990 if (atomic_dec_and_test(&qp->refcount)) 991 wake_up(&qp->wait); 992 if (tx->mr) { 993 qib_put_mr(tx->mr); 994 tx->mr = NULL; 995 } 996 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { 997 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; 998 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, 999 tx->txreq.addr, tx->hdr_dwords << 2, 1000 DMA_TO_DEVICE); 1001 kfree(tx->align_buf); 1002 } 1003 1004 spin_lock_irqsave(&dev->pending_lock, flags); 1005 1006 /* Put struct back on free list */ 1007 list_add(&tx->txreq.list, &dev->txreq_free); 1008 1009 if (!list_empty(&dev->txwait)) { 1010 /* Wake up first QP wanting a free struct */ 1011 qp = list_entry(dev->txwait.next, struct qib_qp, iowait); 1012 list_del_init(&qp->iowait); 1013 atomic_inc(&qp->refcount); 1014 spin_unlock_irqrestore(&dev->pending_lock, flags); 1015 1016 spin_lock_irqsave(&qp->s_lock, flags); 1017 if (qp->s_flags & QIB_S_WAIT_TX) { 1018 qp->s_flags &= ~QIB_S_WAIT_TX; 1019 qib_schedule_send(qp); 1020 } 1021 spin_unlock_irqrestore(&qp->s_lock, flags); 1022 1023 if (atomic_dec_and_test(&qp->refcount)) 1024 wake_up(&qp->wait); 1025 } else 1026 spin_unlock_irqrestore(&dev->pending_lock, flags); 1027} 1028 1029/* 1030 * This is called when there are send DMA descriptors that might be 1031 * available. 1032 * 1033 * This is called with ppd->sdma_lock held. 1034 */ 1035void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) 1036{ 1037 struct qib_qp *qp, *nqp; 1038 struct qib_qp *qps[20]; 1039 struct qib_ibdev *dev; 1040 unsigned i, n; 1041 1042 n = 0; 1043 dev = &ppd->dd->verbs_dev; 1044 spin_lock(&dev->pending_lock); 1045 1046 /* Search wait list for first QP wanting DMA descriptors. */ 1047 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { 1048 if (qp->port_num != ppd->port) 1049 continue; 1050 if (n == ARRAY_SIZE(qps)) 1051 break; 1052 if (qp->s_tx->txreq.sg_count > avail) 1053 break; 1054 avail -= qp->s_tx->txreq.sg_count; 1055 list_del_init(&qp->iowait); 1056 atomic_inc(&qp->refcount); 1057 qps[n++] = qp; 1058 } 1059 1060 spin_unlock(&dev->pending_lock); 1061 1062 for (i = 0; i < n; i++) { 1063 qp = qps[i]; 1064 spin_lock(&qp->s_lock); 1065 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { 1066 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; 1067 qib_schedule_send(qp); 1068 } 1069 spin_unlock(&qp->s_lock); 1070 if (atomic_dec_and_test(&qp->refcount)) 1071 wake_up(&qp->wait); 1072 } 1073} 1074 1075/* 1076 * This is called with ppd->sdma_lock held. 1077 */ 1078static void sdma_complete(struct qib_sdma_txreq *cookie, int status) 1079{ 1080 struct qib_verbs_txreq *tx = 1081 container_of(cookie, struct qib_verbs_txreq, txreq); 1082 struct qib_qp *qp = tx->qp; 1083 1084 spin_lock(&qp->s_lock); 1085 if (tx->wqe) 1086 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 1087 else if (qp->ibqp.qp_type == IB_QPT_RC) { 1088 struct qib_ib_header *hdr; 1089 1090 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) 1091 hdr = &tx->align_buf->hdr; 1092 else { 1093 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1094 1095 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; 1096 } 1097 qib_rc_send_complete(qp, hdr); 1098 } 1099 if (atomic_dec_and_test(&qp->s_dma_busy)) { 1100 if (qp->state == IB_QPS_RESET) 1101 wake_up(&qp->wait_dma); 1102 else if (qp->s_flags & QIB_S_WAIT_DMA) { 1103 qp->s_flags &= ~QIB_S_WAIT_DMA; 1104 qib_schedule_send(qp); 1105 } 1106 } 1107 spin_unlock(&qp->s_lock); 1108 1109 qib_put_txreq(tx); 1110} 1111 1112static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) 1113{ 1114 unsigned long flags; 1115 int ret = 0; 1116 1117 spin_lock_irqsave(&qp->s_lock, flags); 1118 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1119 spin_lock(&dev->pending_lock); 1120 if (list_empty(&qp->iowait)) { 1121 if (list_empty(&dev->memwait)) 1122 mod_timer(&dev->mem_timer, jiffies + 1); 1123 qp->s_flags |= QIB_S_WAIT_KMEM; 1124 list_add_tail(&qp->iowait, &dev->memwait); 1125 } 1126 spin_unlock(&dev->pending_lock); 1127 qp->s_flags &= ~QIB_S_BUSY; 1128 ret = -EBUSY; 1129 } 1130 spin_unlock_irqrestore(&qp->s_lock, flags); 1131 1132 return ret; 1133} 1134 1135static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, 1136 u32 hdrwords, struct qib_sge_state *ss, u32 len, 1137 u32 plen, u32 dwords) 1138{ 1139 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1140 struct qib_devdata *dd = dd_from_dev(dev); 1141 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 1142 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1143 struct qib_verbs_txreq *tx; 1144 struct qib_pio_header *phdr; 1145 u32 control; 1146 u32 ndesc; 1147 int ret; 1148 1149 tx = qp->s_tx; 1150 if (tx) { 1151 qp->s_tx = NULL; 1152 /* resend previously constructed packet */ 1153 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); 1154 goto bail; 1155 } 1156 1157 tx = get_txreq(dev, qp); 1158 if (IS_ERR(tx)) 1159 goto bail_tx; 1160 1161 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1162 be16_to_cpu(hdr->lrh[0]) >> 12); 1163 tx->qp = qp; 1164 atomic_inc(&qp->refcount); 1165 tx->wqe = qp->s_wqe; 1166 tx->mr = qp->s_rdma_mr; 1167 if (qp->s_rdma_mr) 1168 qp->s_rdma_mr = NULL; 1169 tx->txreq.callback = sdma_complete; 1170 if (dd->flags & QIB_HAS_SDMA_TIMEOUT) 1171 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; 1172 else 1173 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; 1174 if (plen + 1 > dd->piosize2kmax_dwords) 1175 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; 1176 1177 if (len) { 1178 /* 1179 * Don't try to DMA if it takes more descriptors than 1180 * the queue holds. 1181 */ 1182 ndesc = qib_count_sge(ss, len); 1183 if (ndesc >= ppd->sdma_descq_cnt) 1184 ndesc = 0; 1185 } else 1186 ndesc = 1; 1187 if (ndesc) { 1188 phdr = &dev->pio_hdrs[tx->hdr_inx]; 1189 phdr->pbc[0] = cpu_to_le32(plen); 1190 phdr->pbc[1] = cpu_to_le32(control); 1191 memcpy(&phdr->hdr, hdr, hdrwords << 2); 1192 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; 1193 tx->txreq.sg_count = ndesc; 1194 tx->txreq.addr = dev->pio_hdrs_phys + 1195 tx->hdr_inx * sizeof(struct qib_pio_header); 1196 tx->hdr_dwords = hdrwords + 2; /* add PBC length */ 1197 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); 1198 goto bail; 1199 } 1200 1201 /* Allocate a buffer and copy the header and payload to it. */ 1202 tx->hdr_dwords = plen + 1; 1203 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); 1204 if (!phdr) 1205 goto err_tx; 1206 phdr->pbc[0] = cpu_to_le32(plen); 1207 phdr->pbc[1] = cpu_to_le32(control); 1208 memcpy(&phdr->hdr, hdr, hdrwords << 2); 1209 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); 1210 1211 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, 1212 tx->hdr_dwords << 2, DMA_TO_DEVICE); 1213 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) 1214 goto map_err; 1215 tx->align_buf = phdr; 1216 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; 1217 tx->txreq.sg_count = 1; 1218 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); 1219 goto unaligned; 1220 1221map_err: 1222 kfree(phdr); 1223err_tx: 1224 qib_put_txreq(tx); 1225 ret = wait_kmem(dev, qp); 1226unaligned: 1227 ibp->n_unaligned++; 1228bail: 1229 return ret; 1230bail_tx: 1231 ret = PTR_ERR(tx); 1232 goto bail; 1233} 1234 1235/* 1236 * If we are now in the error state, return zero to flush the 1237 * send work request. 1238 */ 1239static int no_bufs_available(struct qib_qp *qp) 1240{ 1241 struct qib_ibdev *dev = to_idev(qp->ibqp.device); 1242 struct qib_devdata *dd; 1243 unsigned long flags; 1244 int ret = 0; 1245 1246 /* 1247 * Note that as soon as want_buffer() is called and 1248 * possibly before it returns, qib_ib_piobufavail() 1249 * could be called. Therefore, put QP on the I/O wait list before 1250 * enabling the PIO avail interrupt. 1251 */ 1252 spin_lock_irqsave(&qp->s_lock, flags); 1253 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { 1254 spin_lock(&dev->pending_lock); 1255 if (list_empty(&qp->iowait)) { 1256 dev->n_piowait++; 1257 qp->s_flags |= QIB_S_WAIT_PIO; 1258 list_add_tail(&qp->iowait, &dev->piowait); 1259 dd = dd_from_dev(dev); 1260 dd->f_wantpiobuf_intr(dd, 1); 1261 } 1262 spin_unlock(&dev->pending_lock); 1263 qp->s_flags &= ~QIB_S_BUSY; 1264 ret = -EBUSY; 1265 } 1266 spin_unlock_irqrestore(&qp->s_lock, flags); 1267 return ret; 1268} 1269 1270static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, 1271 u32 hdrwords, struct qib_sge_state *ss, u32 len, 1272 u32 plen, u32 dwords) 1273{ 1274 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1275 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; 1276 u32 *hdr = (u32 *) ibhdr; 1277 u32 __iomem *piobuf_orig; 1278 u32 __iomem *piobuf; 1279 u64 pbc; 1280 unsigned long flags; 1281 unsigned flush_wc; 1282 u32 control; 1283 u32 pbufn; 1284 1285 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, 1286 be16_to_cpu(ibhdr->lrh[0]) >> 12); 1287 pbc = ((u64) control << 32) | plen; 1288 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); 1289 if (unlikely(piobuf == NULL)) 1290 return no_bufs_available(qp); 1291 1292 /* 1293 * Write the pbc. 1294 * We have to flush after the PBC for correctness on some cpus 1295 * or WC buffer can be written out of order. 1296 */ 1297 writeq(pbc, piobuf); 1298 piobuf_orig = piobuf; 1299 piobuf += 2; 1300 1301 flush_wc = dd->flags & QIB_PIO_FLUSH_WC; 1302 if (len == 0) { 1303 /* 1304 * If there is just the header portion, must flush before 1305 * writing last word of header for correctness, and after 1306 * the last header word (trigger word). 1307 */ 1308 if (flush_wc) { 1309 qib_flush_wc(); 1310 qib_pio_copy(piobuf, hdr, hdrwords - 1); 1311 qib_flush_wc(); 1312 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); 1313 qib_flush_wc(); 1314 } else 1315 qib_pio_copy(piobuf, hdr, hdrwords); 1316 goto done; 1317 } 1318 1319 if (flush_wc) 1320 qib_flush_wc(); 1321 qib_pio_copy(piobuf, hdr, hdrwords); 1322 piobuf += hdrwords; 1323 1324 /* The common case is aligned and contained in one segment. */ 1325 if (likely(ss->num_sge == 1 && len <= ss->sge.length && 1326 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 1327 u32 *addr = (u32 *) ss->sge.vaddr; 1328 1329 /* Update address before sending packet. */ 1330 update_sge(ss, len); 1331 if (flush_wc) { 1332 qib_pio_copy(piobuf, addr, dwords - 1); 1333 /* must flush early everything before trigger word */ 1334 qib_flush_wc(); 1335 __raw_writel(addr[dwords - 1], piobuf + dwords - 1); 1336 /* be sure trigger word is written */ 1337 qib_flush_wc(); 1338 } else 1339 qib_pio_copy(piobuf, addr, dwords); 1340 goto done; 1341 } 1342 copy_io(piobuf, ss, len, flush_wc); 1343done: 1344 if (dd->flags & QIB_USE_SPCL_TRIG) { 1345 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1346 1347 qib_flush_wc(); 1348 __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1349 } 1350 qib_sendbuf_done(dd, pbufn); 1351 if (qp->s_rdma_mr) { 1352 qib_put_mr(qp->s_rdma_mr); 1353 qp->s_rdma_mr = NULL; 1354 } 1355 if (qp->s_wqe) { 1356 spin_lock_irqsave(&qp->s_lock, flags); 1357 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1358 spin_unlock_irqrestore(&qp->s_lock, flags); 1359 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1360 spin_lock_irqsave(&qp->s_lock, flags); 1361 qib_rc_send_complete(qp, ibhdr); 1362 spin_unlock_irqrestore(&qp->s_lock, flags); 1363 } 1364 return 0; 1365} 1366 1367/** 1368 * qib_verbs_send - send a packet 1369 * @qp: the QP to send on 1370 * @hdr: the packet header 1371 * @hdrwords: the number of 32-bit words in the header 1372 * @ss: the SGE to send 1373 * @len: the length of the packet in bytes 1374 * 1375 * Return zero if packet is sent or queued OK. 1376 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. 1377 */ 1378int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 1379 u32 hdrwords, struct qib_sge_state *ss, u32 len) 1380{ 1381 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); 1382 u32 plen; 1383 int ret; 1384 u32 dwords = (len + 3) >> 2; 1385 1386 /* 1387 * Calculate the send buffer trigger address. 1388 * The +1 counts for the pbc control dword following the pbc length. 1389 */ 1390 plen = hdrwords + dwords + 1; 1391 1392 /* 1393 * VL15 packets (IB_QPT_SMI) will always use PIO, so we 1394 * can defer SDMA restart until link goes ACTIVE without 1395 * worrying about just how we got there. 1396 */ 1397 if (qp->ibqp.qp_type == IB_QPT_SMI || 1398 !(dd->flags & QIB_HAS_SEND_DMA)) 1399 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1400 plen, dwords); 1401 else 1402 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1403 plen, dwords); 1404 1405 return ret; 1406} 1407 1408int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 1409 u64 *rwords, u64 *spkts, u64 *rpkts, 1410 u64 *xmit_wait) 1411{ 1412 int ret; 1413 struct qib_devdata *dd = ppd->dd; 1414 1415 if (!(dd->flags & QIB_PRESENT)) { 1416 /* no hardware, freeze, etc. */ 1417 ret = -EINVAL; 1418 goto bail; 1419 } 1420 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); 1421 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); 1422 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); 1423 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); 1424 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); 1425 1426 ret = 0; 1427 1428bail: 1429 return ret; 1430} 1431 1432/** 1433 * qib_get_counters - get various chip counters 1434 * @dd: the qlogic_ib device 1435 * @cntrs: counters are placed here 1436 * 1437 * Return the counters needed by recv_pma_get_portcounters(). 1438 */ 1439int qib_get_counters(struct qib_pportdata *ppd, 1440 struct qib_verbs_counters *cntrs) 1441{ 1442 int ret; 1443 1444 if (!(ppd->dd->flags & QIB_PRESENT)) { 1445 /* no hardware, freeze, etc. */ 1446 ret = -EINVAL; 1447 goto bail; 1448 } 1449 cntrs->symbol_error_counter = 1450 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); 1451 cntrs->link_error_recovery_counter = 1452 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); 1453 /* 1454 * The link downed counter counts when the other side downs the 1455 * connection. We add in the number of times we downed the link 1456 * due to local link integrity errors to compensate. 1457 */ 1458 cntrs->link_downed_counter = 1459 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); 1460 cntrs->port_rcv_errors = 1461 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + 1462 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + 1463 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + 1464 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + 1465 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + 1466 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + 1467 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + 1468 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + 1469 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); 1470 cntrs->port_rcv_errors += 1471 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); 1472 cntrs->port_rcv_errors += 1473 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); 1474 cntrs->port_rcv_remphys_errors = 1475 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); 1476 cntrs->port_xmit_discards = 1477 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); 1478 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, 1479 QIBPORTCNTR_WORDSEND); 1480 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, 1481 QIBPORTCNTR_WORDRCV); 1482 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, 1483 QIBPORTCNTR_PKTSEND); 1484 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, 1485 QIBPORTCNTR_PKTRCV); 1486 cntrs->local_link_integrity_errors = 1487 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); 1488 cntrs->excessive_buffer_overrun_errors = 1489 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); 1490 cntrs->vl15_dropped = 1491 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); 1492 1493 ret = 0; 1494 1495bail: 1496 return ret; 1497} 1498 1499/** 1500 * qib_ib_piobufavail - callback when a PIO buffer is available 1501 * @dd: the device pointer 1502 * 1503 * This is called from qib_intr() at interrupt level when a PIO buffer is 1504 * available after qib_verbs_send() returned an error that no buffers were 1505 * available. Disable the interrupt if there are no more QPs waiting. 1506 */ 1507void qib_ib_piobufavail(struct qib_devdata *dd) 1508{ 1509 struct qib_ibdev *dev = &dd->verbs_dev; 1510 struct list_head *list; 1511 struct qib_qp *qps[5]; 1512 struct qib_qp *qp; 1513 unsigned long flags; 1514 unsigned i, n; 1515 1516 list = &dev->piowait; 1517 n = 0; 1518 1519 /* 1520 * Note: checking that the piowait list is empty and clearing 1521 * the buffer available interrupt needs to be atomic or we 1522 * could end up with QPs on the wait list with the interrupt 1523 * disabled. 1524 */ 1525 spin_lock_irqsave(&dev->pending_lock, flags); 1526 while (!list_empty(list)) { 1527 if (n == ARRAY_SIZE(qps)) 1528 goto full; 1529 qp = list_entry(list->next, struct qib_qp, iowait); 1530 list_del_init(&qp->iowait); 1531 atomic_inc(&qp->refcount); 1532 qps[n++] = qp; 1533 } 1534 dd->f_wantpiobuf_intr(dd, 0); 1535full: 1536 spin_unlock_irqrestore(&dev->pending_lock, flags); 1537 1538 for (i = 0; i < n; i++) { 1539 qp = qps[i]; 1540 1541 spin_lock_irqsave(&qp->s_lock, flags); 1542 if (qp->s_flags & QIB_S_WAIT_PIO) { 1543 qp->s_flags &= ~QIB_S_WAIT_PIO; 1544 qib_schedule_send(qp); 1545 } 1546 spin_unlock_irqrestore(&qp->s_lock, flags); 1547 1548 /* Notify qib_destroy_qp() if it is waiting. */ 1549 if (atomic_dec_and_test(&qp->refcount)) 1550 wake_up(&qp->wait); 1551 } 1552} 1553 1554static int qib_query_device(struct ib_device *ibdev, 1555 struct ib_device_attr *props) 1556{ 1557 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1558 struct qib_ibdev *dev = to_idev(ibdev); 1559 1560 memset(props, 0, sizeof(*props)); 1561 1562 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | 1563 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1564 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1565 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; 1566 props->page_size_cap = PAGE_SIZE; 1567 props->vendor_id = 1568 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; 1569 props->vendor_part_id = dd->deviceid; 1570 props->hw_ver = dd->minrev; 1571 props->sys_image_guid = ib_qib_sys_image_guid; 1572 props->max_mr_size = ~0ULL; 1573 props->max_qp = ib_qib_max_qps; 1574 props->max_qp_wr = ib_qib_max_qp_wrs; 1575 props->max_sge = ib_qib_max_sges; 1576 props->max_cq = ib_qib_max_cqs; 1577 props->max_ah = ib_qib_max_ahs; 1578 props->max_cqe = ib_qib_max_cqes; 1579 props->max_mr = dev->lk_table.max; 1580 props->max_fmr = dev->lk_table.max; 1581 props->max_map_per_fmr = 32767; 1582 props->max_pd = ib_qib_max_pds; 1583 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; 1584 props->max_qp_init_rd_atom = 255; 1585 /* props->max_res_rd_atom */ 1586 props->max_srq = ib_qib_max_srqs; 1587 props->max_srq_wr = ib_qib_max_srq_wrs; 1588 props->max_srq_sge = ib_qib_max_srq_sges; 1589 /* props->local_ca_ack_delay */ 1590 props->atomic_cap = IB_ATOMIC_GLOB; 1591 props->max_pkeys = qib_get_npkeys(dd); 1592 props->max_mcast_grp = ib_qib_max_mcast_grps; 1593 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; 1594 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 1595 props->max_mcast_grp; 1596 1597 return 0; 1598} 1599 1600static int qib_query_port(struct ib_device *ibdev, u8 port, 1601 struct ib_port_attr *props) 1602{ 1603 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1604 struct qib_ibport *ibp = to_iport(ibdev, port); 1605 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1606 enum ib_mtu mtu; 1607 u16 lid = ppd->lid; 1608 1609 memset(props, 0, sizeof(*props)); 1610 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); 1611 props->lmc = ppd->lmc; 1612 props->sm_lid = ibp->sm_lid; 1613 props->sm_sl = ibp->sm_sl; 1614 props->state = dd->f_iblink_state(ppd->lastibcstat); 1615 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); 1616 props->port_cap_flags = ibp->port_cap_flags; 1617 props->gid_tbl_len = QIB_GUIDS_PER_PORT; 1618 props->max_msg_sz = 0x80000000; 1619 props->pkey_tbl_len = qib_get_npkeys(dd); 1620 props->bad_pkey_cntr = ibp->pkey_violations; 1621 props->qkey_viol_cntr = ibp->qkey_violations; 1622 props->active_width = ppd->link_width_active; 1623 /* See rate_show() */ 1624 props->active_speed = ppd->link_speed_active; 1625 props->max_vl_num = qib_num_vls(ppd->vls_supported); 1626 props->init_type_reply = 0; 1627 1628 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; 1629 switch (ppd->ibmtu) { 1630 case 4096: 1631 mtu = IB_MTU_4096; 1632 break; 1633 case 2048: 1634 mtu = IB_MTU_2048; 1635 break; 1636 case 1024: 1637 mtu = IB_MTU_1024; 1638 break; 1639 case 512: 1640 mtu = IB_MTU_512; 1641 break; 1642 case 256: 1643 mtu = IB_MTU_256; 1644 break; 1645 default: 1646 mtu = IB_MTU_2048; 1647 } 1648 props->active_mtu = mtu; 1649 props->subnet_timeout = ibp->subnet_timeout; 1650 1651 return 0; 1652} 1653 1654static int qib_modify_device(struct ib_device *device, 1655 int device_modify_mask, 1656 struct ib_device_modify *device_modify) 1657{ 1658 struct qib_devdata *dd = dd_from_ibdev(device); 1659 unsigned i; 1660 int ret; 1661 1662 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | 1663 IB_DEVICE_MODIFY_NODE_DESC)) { 1664 ret = -EOPNOTSUPP; 1665 goto bail; 1666 } 1667 1668 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { 1669 memcpy(device->node_desc, device_modify->node_desc, 64); 1670 for (i = 0; i < dd->num_pports; i++) { 1671 struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1672 1673 qib_node_desc_chg(ibp); 1674 } 1675 } 1676 1677 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { 1678 ib_qib_sys_image_guid = 1679 cpu_to_be64(device_modify->sys_image_guid); 1680 for (i = 0; i < dd->num_pports; i++) { 1681 struct qib_ibport *ibp = &dd->pport[i].ibport_data; 1682 1683 qib_sys_guid_chg(ibp); 1684 } 1685 } 1686 1687 ret = 0; 1688 1689bail: 1690 return ret; 1691} 1692 1693static int qib_modify_port(struct ib_device *ibdev, u8 port, 1694 int port_modify_mask, struct ib_port_modify *props) 1695{ 1696 struct qib_ibport *ibp = to_iport(ibdev, port); 1697 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1698 1699 ibp->port_cap_flags |= props->set_port_cap_mask; 1700 ibp->port_cap_flags &= ~props->clr_port_cap_mask; 1701 if (props->set_port_cap_mask || props->clr_port_cap_mask) 1702 qib_cap_mask_chg(ibp); 1703 if (port_modify_mask & IB_PORT_SHUTDOWN) 1704 qib_set_linkstate(ppd, QIB_IB_LINKDOWN); 1705 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) 1706 ibp->qkey_violations = 0; 1707 return 0; 1708} 1709 1710static int qib_query_gid(struct ib_device *ibdev, u8 port, 1711 int index, union ib_gid *gid) 1712{ 1713 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1714 int ret = 0; 1715 1716 if (!port || port > dd->num_pports) 1717 ret = -EINVAL; 1718 else { 1719 struct qib_ibport *ibp = to_iport(ibdev, port); 1720 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1721 1722 gid->global.subnet_prefix = ibp->gid_prefix; 1723 if (index == 0) 1724 gid->global.interface_id = ppd->guid; 1725 else if (index < QIB_GUIDS_PER_PORT) 1726 gid->global.interface_id = ibp->guids[index - 1]; 1727 else 1728 ret = -EINVAL; 1729 } 1730 1731 return ret; 1732} 1733 1734static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, 1735 struct ib_ucontext *context, 1736 struct ib_udata *udata) 1737{ 1738 struct qib_ibdev *dev = to_idev(ibdev); 1739 struct qib_pd *pd; 1740 struct ib_pd *ret; 1741 1742 /* 1743 * This is actually totally arbitrary. Some correctness tests 1744 * assume there's a maximum number of PDs that can be allocated. 1745 * We don't actually have this limit, but we fail the test if 1746 * we allow allocations of more than we report for this value. 1747 */ 1748 1749 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1750 if (!pd) { 1751 ret = ERR_PTR(-ENOMEM); 1752 goto bail; 1753 } 1754 1755 spin_lock(&dev->n_pds_lock); 1756 if (dev->n_pds_allocated == ib_qib_max_pds) { 1757 spin_unlock(&dev->n_pds_lock); 1758 kfree(pd); 1759 ret = ERR_PTR(-ENOMEM); 1760 goto bail; 1761 } 1762 1763 dev->n_pds_allocated++; 1764 spin_unlock(&dev->n_pds_lock); 1765 1766 /* ib_alloc_pd() will initialize pd->ibpd. */ 1767 pd->user = udata != NULL; 1768 1769 ret = &pd->ibpd; 1770 1771bail: 1772 return ret; 1773} 1774 1775static int qib_dealloc_pd(struct ib_pd *ibpd) 1776{ 1777 struct qib_pd *pd = to_ipd(ibpd); 1778 struct qib_ibdev *dev = to_idev(ibpd->device); 1779 1780 spin_lock(&dev->n_pds_lock); 1781 dev->n_pds_allocated--; 1782 spin_unlock(&dev->n_pds_lock); 1783 1784 kfree(pd); 1785 1786 return 0; 1787} 1788 1789int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) 1790{ 1791 /* A multicast address requires a GRH (see ch. 8.4.1). */ 1792 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && 1793 ah_attr->dlid != QIB_PERMISSIVE_LID && 1794 !(ah_attr->ah_flags & IB_AH_GRH)) 1795 goto bail; 1796 if ((ah_attr->ah_flags & IB_AH_GRH) && 1797 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) 1798 goto bail; 1799 if (ah_attr->dlid == 0) 1800 goto bail; 1801 if (ah_attr->port_num < 1 || 1802 ah_attr->port_num > ibdev->phys_port_cnt) 1803 goto bail; 1804 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && 1805 ib_rate_to_mult(ah_attr->static_rate) < 0) 1806 goto bail; 1807 if (ah_attr->sl > 15) 1808 goto bail; 1809 return 0; 1810bail: 1811 return -EINVAL; 1812} 1813 1814/** 1815 * qib_create_ah - create an address handle 1816 * @pd: the protection domain 1817 * @ah_attr: the attributes of the AH 1818 * 1819 * This may be called from interrupt context. 1820 */ 1821static struct ib_ah *qib_create_ah(struct ib_pd *pd, 1822 struct ib_ah_attr *ah_attr) 1823{ 1824 struct qib_ah *ah; 1825 struct ib_ah *ret; 1826 struct qib_ibdev *dev = to_idev(pd->device); 1827 unsigned long flags; 1828 1829 if (qib_check_ah(pd->device, ah_attr)) { 1830 ret = ERR_PTR(-EINVAL); 1831 goto bail; 1832 } 1833 1834 ah = kmalloc(sizeof(*ah), GFP_ATOMIC); 1835 if (!ah) { 1836 ret = ERR_PTR(-ENOMEM); 1837 goto bail; 1838 } 1839 1840 spin_lock_irqsave(&dev->n_ahs_lock, flags); 1841 if (dev->n_ahs_allocated == ib_qib_max_ahs) { 1842 spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1843 kfree(ah); 1844 ret = ERR_PTR(-ENOMEM); 1845 goto bail; 1846 } 1847 1848 dev->n_ahs_allocated++; 1849 spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1850 1851 /* ib_create_ah() will initialize ah->ibah. */ 1852 ah->attr = *ah_attr; 1853 atomic_set(&ah->refcount, 0); 1854 1855 ret = &ah->ibah; 1856 1857bail: 1858 return ret; 1859} 1860 1861struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) 1862{ 1863 struct ib_ah_attr attr; 1864 struct ib_ah *ah = ERR_PTR(-EINVAL); 1865 struct qib_qp *qp0; 1866 1867 memset(&attr, 0, sizeof(attr)); 1868 attr.dlid = dlid; 1869 attr.port_num = ppd_from_ibp(ibp)->port; 1870 rcu_read_lock(); 1871 qp0 = rcu_dereference(ibp->qp0); 1872 if (qp0) 1873 ah = ib_create_ah(qp0->ibqp.pd, &attr); 1874 rcu_read_unlock(); 1875 return ah; 1876} 1877 1878/** 1879 * qib_destroy_ah - destroy an address handle 1880 * @ibah: the AH to destroy 1881 * 1882 * This may be called from interrupt context. 1883 */ 1884static int qib_destroy_ah(struct ib_ah *ibah) 1885{ 1886 struct qib_ibdev *dev = to_idev(ibah->device); 1887 struct qib_ah *ah = to_iah(ibah); 1888 unsigned long flags; 1889 1890 if (atomic_read(&ah->refcount) != 0) 1891 return -EBUSY; 1892 1893 spin_lock_irqsave(&dev->n_ahs_lock, flags); 1894 dev->n_ahs_allocated--; 1895 spin_unlock_irqrestore(&dev->n_ahs_lock, flags); 1896 1897 kfree(ah); 1898 1899 return 0; 1900} 1901 1902static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1903{ 1904 struct qib_ah *ah = to_iah(ibah); 1905 1906 if (qib_check_ah(ibah->device, ah_attr)) 1907 return -EINVAL; 1908 1909 ah->attr = *ah_attr; 1910 1911 return 0; 1912} 1913 1914static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 1915{ 1916 struct qib_ah *ah = to_iah(ibah); 1917 1918 *ah_attr = ah->attr; 1919 1920 return 0; 1921} 1922 1923/** 1924 * qib_get_npkeys - return the size of the PKEY table for context 0 1925 * @dd: the qlogic_ib device 1926 */ 1927unsigned qib_get_npkeys(struct qib_devdata *dd) 1928{ 1929 return ARRAY_SIZE(dd->rcd[0]->pkeys); 1930} 1931 1932/* 1933 * Return the indexed PKEY from the port PKEY table. 1934 * No need to validate rcd[ctxt]; the port is setup if we are here. 1935 */ 1936unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) 1937{ 1938 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1939 struct qib_devdata *dd = ppd->dd; 1940 unsigned ctxt = ppd->hw_pidx; 1941 unsigned ret; 1942 1943 /* dd->rcd null if mini_init or some init failures */ 1944 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) 1945 ret = 0; 1946 else 1947 ret = dd->rcd[ctxt]->pkeys[index]; 1948 1949 return ret; 1950} 1951 1952static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 1953 u16 *pkey) 1954{ 1955 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1956 int ret; 1957 1958 if (index >= qib_get_npkeys(dd)) { 1959 ret = -EINVAL; 1960 goto bail; 1961 } 1962 1963 *pkey = qib_get_pkey(to_iport(ibdev, port), index); 1964 ret = 0; 1965 1966bail: 1967 return ret; 1968} 1969 1970/** 1971 * qib_alloc_ucontext - allocate a ucontest 1972 * @ibdev: the infiniband device 1973 * @udata: not used by the QLogic_IB driver 1974 */ 1975 1976static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, 1977 struct ib_udata *udata) 1978{ 1979 struct qib_ucontext *context; 1980 struct ib_ucontext *ret; 1981 1982 context = kmalloc(sizeof(*context), GFP_KERNEL); 1983 if (!context) { 1984 ret = ERR_PTR(-ENOMEM); 1985 goto bail; 1986 } 1987 1988 ret = &context->ibucontext; 1989 1990bail: 1991 return ret; 1992} 1993 1994static int qib_dealloc_ucontext(struct ib_ucontext *context) 1995{ 1996 kfree(to_iucontext(context)); 1997 return 0; 1998} 1999 2000static void init_ibport(struct qib_pportdata *ppd) 2001{ 2002 struct qib_verbs_counters cntrs; 2003 struct qib_ibport *ibp = &ppd->ibport_data; 2004 2005 spin_lock_init(&ibp->lock); 2006 /* Set the prefix to the default value (see ch. 4.1.1) */ 2007 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; 2008 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); 2009 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | 2010 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | 2011 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | 2012 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | 2013 IB_PORT_OTHER_LOCAL_CHANGES_SUP; 2014 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) 2015 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; 2016 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; 2017 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 2018 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 2019 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 2020 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; 2021 2022 /* Snapshot current HW counters to "clear" them. */ 2023 qib_get_counters(ppd, &cntrs); 2024 ibp->z_symbol_error_counter = cntrs.symbol_error_counter; 2025 ibp->z_link_error_recovery_counter = 2026 cntrs.link_error_recovery_counter; 2027 ibp->z_link_downed_counter = cntrs.link_downed_counter; 2028 ibp->z_port_rcv_errors = cntrs.port_rcv_errors; 2029 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; 2030 ibp->z_port_xmit_discards = cntrs.port_xmit_discards; 2031 ibp->z_port_xmit_data = cntrs.port_xmit_data; 2032 ibp->z_port_rcv_data = cntrs.port_rcv_data; 2033 ibp->z_port_xmit_packets = cntrs.port_xmit_packets; 2034 ibp->z_port_rcv_packets = cntrs.port_rcv_packets; 2035 ibp->z_local_link_integrity_errors = 2036 cntrs.local_link_integrity_errors; 2037 ibp->z_excessive_buffer_overrun_errors = 2038 cntrs.excessive_buffer_overrun_errors; 2039 ibp->z_vl15_dropped = cntrs.vl15_dropped; 2040 RCU_INIT_POINTER(ibp->qp0, NULL); 2041 RCU_INIT_POINTER(ibp->qp1, NULL); 2042} 2043 2044/** 2045 * qib_register_ib_device - register our device with the infiniband core 2046 * @dd: the device data structure 2047 * Return the allocated qib_ibdev pointer or NULL on error. 2048 */ 2049int qib_register_ib_device(struct qib_devdata *dd) 2050{ 2051 struct qib_ibdev *dev = &dd->verbs_dev; 2052 struct ib_device *ibdev = &dev->ibdev; 2053 struct qib_pportdata *ppd = dd->pport; 2054 unsigned i, lk_tab_size; 2055 int ret; 2056 2057 dev->qp_table_size = ib_qib_qp_table_size; 2058 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2059 dev->qp_table = kmalloc_array( 2060 dev->qp_table_size, 2061 sizeof(*dev->qp_table), 2062 GFP_KERNEL); 2063 if (!dev->qp_table) { 2064 ret = -ENOMEM; 2065 goto err_qpt; 2066 } 2067 for (i = 0; i < dev->qp_table_size; i++) 2068 RCU_INIT_POINTER(dev->qp_table[i], NULL); 2069 2070 for (i = 0; i < dd->num_pports; i++) 2071 init_ibport(ppd + i); 2072 2073 /* Only need to initialize non-zero fields. */ 2074 spin_lock_init(&dev->qpt_lock); 2075 spin_lock_init(&dev->n_pds_lock); 2076 spin_lock_init(&dev->n_ahs_lock); 2077 spin_lock_init(&dev->n_cqs_lock); 2078 spin_lock_init(&dev->n_qps_lock); 2079 spin_lock_init(&dev->n_srqs_lock); 2080 spin_lock_init(&dev->n_mcast_grps_lock); 2081 init_timer(&dev->mem_timer); 2082 dev->mem_timer.function = mem_timer; 2083 dev->mem_timer.data = (unsigned long) dev; 2084 2085 qib_init_qpn_table(dd, &dev->qpn_table); 2086 2087 /* 2088 * The top ib_qib_lkey_table_size bits are used to index the 2089 * table. The lower 8 bits can be owned by the user (copied from 2090 * the LKEY). The remaining bits act as a generation number or tag. 2091 */ 2092 spin_lock_init(&dev->lk_table.lock); 2093 /* insure generation is at least 4 bits see keys.c */ 2094 if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) { 2095 qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", 2096 ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS); 2097 ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS; 2098 } 2099 dev->lk_table.max = 1 << ib_qib_lkey_table_size; 2100 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 2101 dev->lk_table.table = (struct qib_mregion __rcu **) 2102 vmalloc(lk_tab_size); 2103 if (dev->lk_table.table == NULL) { 2104 ret = -ENOMEM; 2105 goto err_lk; 2106 } 2107 RCU_INIT_POINTER(dev->dma_mr, NULL); 2108 for (i = 0; i < dev->lk_table.max; i++) 2109 RCU_INIT_POINTER(dev->lk_table.table[i], NULL); 2110 INIT_LIST_HEAD(&dev->pending_mmaps); 2111 spin_lock_init(&dev->pending_lock); 2112 dev->mmap_offset = PAGE_SIZE; 2113 spin_lock_init(&dev->mmap_offset_lock); 2114 INIT_LIST_HEAD(&dev->piowait); 2115 INIT_LIST_HEAD(&dev->dmawait); 2116 INIT_LIST_HEAD(&dev->txwait); 2117 INIT_LIST_HEAD(&dev->memwait); 2118 INIT_LIST_HEAD(&dev->txreq_free); 2119 2120 if (ppd->sdma_descq_cnt) { 2121 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, 2122 ppd->sdma_descq_cnt * 2123 sizeof(struct qib_pio_header), 2124 &dev->pio_hdrs_phys, 2125 GFP_KERNEL); 2126 if (!dev->pio_hdrs) { 2127 ret = -ENOMEM; 2128 goto err_hdrs; 2129 } 2130 } 2131 2132 for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2133 struct qib_verbs_txreq *tx; 2134 2135 tx = kzalloc(sizeof(*tx), GFP_KERNEL); 2136 if (!tx) { 2137 ret = -ENOMEM; 2138 goto err_tx; 2139 } 2140 tx->hdr_inx = i; 2141 list_add(&tx->txreq.list, &dev->txreq_free); 2142 } 2143 2144 /* 2145 * The system image GUID is supposed to be the same for all 2146 * IB HCAs in a single system but since there can be other 2147 * device types in the system, we can't be sure this is unique. 2148 */ 2149 if (!ib_qib_sys_image_guid) 2150 ib_qib_sys_image_guid = ppd->guid; 2151 2152 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); 2153 ibdev->owner = THIS_MODULE; 2154 ibdev->node_guid = ppd->guid; 2155 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; 2156 ibdev->uverbs_cmd_mask = 2157 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2158 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2159 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2160 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2161 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2162 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2163 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | 2164 (1ull << IB_USER_VERBS_CMD_QUERY_AH) | 2165 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2166 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2167 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2168 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2169 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2170 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2171 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2172 (1ull << IB_USER_VERBS_CMD_POLL_CQ) | 2173 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | 2174 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2175 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2176 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2177 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2178 (1ull << IB_USER_VERBS_CMD_POST_SEND) | 2179 (1ull << IB_USER_VERBS_CMD_POST_RECV) | 2180 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2181 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2182 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2183 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2184 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2185 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2186 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); 2187 ibdev->node_type = RDMA_NODE_IB_CA; 2188 ibdev->phys_port_cnt = dd->num_pports; 2189 ibdev->num_comp_vectors = 1; 2190 ibdev->dma_device = &dd->pcidev->dev; 2191 ibdev->query_device = qib_query_device; 2192 ibdev->modify_device = qib_modify_device; 2193 ibdev->query_port = qib_query_port; 2194 ibdev->modify_port = qib_modify_port; 2195 ibdev->query_pkey = qib_query_pkey; 2196 ibdev->query_gid = qib_query_gid; 2197 ibdev->alloc_ucontext = qib_alloc_ucontext; 2198 ibdev->dealloc_ucontext = qib_dealloc_ucontext; 2199 ibdev->alloc_pd = qib_alloc_pd; 2200 ibdev->dealloc_pd = qib_dealloc_pd; 2201 ibdev->create_ah = qib_create_ah; 2202 ibdev->destroy_ah = qib_destroy_ah; 2203 ibdev->modify_ah = qib_modify_ah; 2204 ibdev->query_ah = qib_query_ah; 2205 ibdev->create_srq = qib_create_srq; 2206 ibdev->modify_srq = qib_modify_srq; 2207 ibdev->query_srq = qib_query_srq; 2208 ibdev->destroy_srq = qib_destroy_srq; 2209 ibdev->create_qp = qib_create_qp; 2210 ibdev->modify_qp = qib_modify_qp; 2211 ibdev->query_qp = qib_query_qp; 2212 ibdev->destroy_qp = qib_destroy_qp; 2213 ibdev->post_send = qib_post_send; 2214 ibdev->post_recv = qib_post_receive; 2215 ibdev->post_srq_recv = qib_post_srq_receive; 2216 ibdev->create_cq = qib_create_cq; 2217 ibdev->destroy_cq = qib_destroy_cq; 2218 ibdev->resize_cq = qib_resize_cq; 2219 ibdev->poll_cq = qib_poll_cq; 2220 ibdev->req_notify_cq = qib_req_notify_cq; 2221 ibdev->get_dma_mr = qib_get_dma_mr; 2222 ibdev->reg_phys_mr = qib_reg_phys_mr; 2223 ibdev->reg_user_mr = qib_reg_user_mr; 2224 ibdev->dereg_mr = qib_dereg_mr; 2225 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; 2226 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; 2227 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; 2228 ibdev->alloc_fmr = qib_alloc_fmr; 2229 ibdev->map_phys_fmr = qib_map_phys_fmr; 2230 ibdev->unmap_fmr = qib_unmap_fmr; 2231 ibdev->dealloc_fmr = qib_dealloc_fmr; 2232 ibdev->attach_mcast = qib_multicast_attach; 2233 ibdev->detach_mcast = qib_multicast_detach; 2234 ibdev->process_mad = qib_process_mad; 2235 ibdev->mmap = qib_mmap; 2236 ibdev->dma_ops = &qib_dma_mapping_ops; 2237 2238 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), 2239 "Intel Infiniband HCA %s", init_utsname()->nodename); 2240 2241 ret = ib_register_device(ibdev, qib_create_port_files); 2242 if (ret) 2243 goto err_reg; 2244 2245 ret = qib_create_agents(dev); 2246 if (ret) 2247 goto err_agents; 2248 2249 ret = qib_verbs_register_sysfs(dd); 2250 if (ret) 2251 goto err_class; 2252 2253 goto bail; 2254 2255err_class: 2256 qib_free_agents(dev); 2257err_agents: 2258 ib_unregister_device(ibdev); 2259err_reg: 2260err_tx: 2261 while (!list_empty(&dev->txreq_free)) { 2262 struct list_head *l = dev->txreq_free.next; 2263 struct qib_verbs_txreq *tx; 2264 2265 list_del(l); 2266 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2267 kfree(tx); 2268 } 2269 if (ppd->sdma_descq_cnt) 2270 dma_free_coherent(&dd->pcidev->dev, 2271 ppd->sdma_descq_cnt * 2272 sizeof(struct qib_pio_header), 2273 dev->pio_hdrs, dev->pio_hdrs_phys); 2274err_hdrs: 2275 vfree(dev->lk_table.table); 2276err_lk: 2277 kfree(dev->qp_table); 2278err_qpt: 2279 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); 2280bail: 2281 return ret; 2282} 2283 2284void qib_unregister_ib_device(struct qib_devdata *dd) 2285{ 2286 struct qib_ibdev *dev = &dd->verbs_dev; 2287 struct ib_device *ibdev = &dev->ibdev; 2288 u32 qps_inuse; 2289 unsigned lk_tab_size; 2290 2291 qib_verbs_unregister_sysfs(dd); 2292 2293 qib_free_agents(dev); 2294 2295 ib_unregister_device(ibdev); 2296 2297 if (!list_empty(&dev->piowait)) 2298 qib_dev_err(dd, "piowait list not empty!\n"); 2299 if (!list_empty(&dev->dmawait)) 2300 qib_dev_err(dd, "dmawait list not empty!\n"); 2301 if (!list_empty(&dev->txwait)) 2302 qib_dev_err(dd, "txwait list not empty!\n"); 2303 if (!list_empty(&dev->memwait)) 2304 qib_dev_err(dd, "memwait list not empty!\n"); 2305 if (dev->dma_mr) 2306 qib_dev_err(dd, "DMA MR not NULL!\n"); 2307 2308 qps_inuse = qib_free_all_qps(dd); 2309 if (qps_inuse) 2310 qib_dev_err(dd, "QP memory leak! %u still in use\n", 2311 qps_inuse); 2312 2313 del_timer_sync(&dev->mem_timer); 2314 qib_free_qpn_table(&dev->qpn_table); 2315 while (!list_empty(&dev->txreq_free)) { 2316 struct list_head *l = dev->txreq_free.next; 2317 struct qib_verbs_txreq *tx; 2318 2319 list_del(l); 2320 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); 2321 kfree(tx); 2322 } 2323 if (dd->pport->sdma_descq_cnt) 2324 dma_free_coherent(&dd->pcidev->dev, 2325 dd->pport->sdma_descq_cnt * 2326 sizeof(struct qib_pio_header), 2327 dev->pio_hdrs, dev->pio_hdrs_phys); 2328 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); 2329 vfree(dev->lk_table.table); 2330 kfree(dev->qp_table); 2331} 2332 2333/* 2334 * This must be called with s_lock held. 2335 */ 2336void qib_schedule_send(struct qib_qp *qp) 2337{ 2338 if (qib_send_ok(qp)) { 2339 struct qib_ibport *ibp = 2340 to_iport(qp->ibqp.device, qp->port_num); 2341 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 2342 2343 queue_work(ppd->qib_wq, &qp->s_work); 2344 } 2345} 2346