/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_qp.c | 223 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) insert_qp() argument 225 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); insert_qp() 227 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); insert_qp() 229 atomic_inc(&qp->refcount); insert_qp() 232 if (qp->ibqp.qp_num == 0) insert_qp() 233 rcu_assign_pointer(ibp->qp0, qp); insert_qp() 234 else if (qp->ibqp.qp_num == 1) insert_qp() 235 rcu_assign_pointer(ibp->qp1, qp); insert_qp() 237 qp->next = dev->qp_table[n]; insert_qp() 238 rcu_assign_pointer(dev->qp_table[n], qp); insert_qp() 248 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) remove_qp() argument 250 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); remove_qp() 251 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); remove_qp() 258 lockdep_is_held(&dev->qpt_lock)) == qp) { remove_qp() 261 lockdep_is_held(&dev->qpt_lock)) == qp) { remove_qp() 272 if (q == qp) { remove_qp() 274 rcu_dereference_protected(qp->next, remove_qp() 284 atomic_dec(&qp->refcount); remove_qp() 299 struct qib_qp *qp; qib_free_all_qps() local 317 qp = rcu_dereference_protected(dev->qp_table[n], qib_free_all_qps() 321 for (; qp; qp = rcu_dereference_protected(qp->next, qib_free_all_qps() 341 struct qib_qp *qp = NULL; qib_lookup_qpn() local 346 qp = rcu_dereference(ibp->qp0); qib_lookup_qpn() 348 qp = rcu_dereference(ibp->qp1); qib_lookup_qpn() 349 if (qp) qib_lookup_qpn() 350 atomic_inc(&qp->refcount); qib_lookup_qpn() 355 for (qp = rcu_dereference(dev->qp_table[n]); qp; qib_lookup_qpn() 356 qp = rcu_dereference(qp->next)) qib_lookup_qpn() 357 if (qp->ibqp.qp_num == qpn) { qib_lookup_qpn() 358 atomic_inc(&qp->refcount); qib_lookup_qpn() 363 return qp; qib_lookup_qpn() 368 * @qp: the QP to reset 371 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) qib_reset_qp() argument 373 qp->remote_qpn = 0; qib_reset_qp() 374 qp->qkey = 0; qib_reset_qp() 375 qp->qp_access_flags = 0; qib_reset_qp() 376 atomic_set(&qp->s_dma_busy, 0); qib_reset_qp() 377 qp->s_flags &= QIB_S_SIGNAL_REQ_WR; qib_reset_qp() 378 qp->s_hdrwords = 0; qib_reset_qp() 379 qp->s_wqe = NULL; qib_reset_qp() 380 qp->s_draining = 0; qib_reset_qp() 381 qp->s_next_psn = 0; qib_reset_qp() 382 qp->s_last_psn = 0; qib_reset_qp() 383 qp->s_sending_psn = 0; qib_reset_qp() 384 qp->s_sending_hpsn = 0; qib_reset_qp() 385 qp->s_psn = 0; qib_reset_qp() 386 qp->r_psn = 0; qib_reset_qp() 387 qp->r_msn = 0; qib_reset_qp() 389 qp->s_state = IB_OPCODE_RC_SEND_LAST; qib_reset_qp() 390 qp->r_state = IB_OPCODE_RC_SEND_LAST; qib_reset_qp() 392 qp->s_state = IB_OPCODE_UC_SEND_LAST; qib_reset_qp() 393 qp->r_state = IB_OPCODE_UC_SEND_LAST; qib_reset_qp() 395 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; qib_reset_qp() 396 qp->r_nak_state = 0; qib_reset_qp() 397 qp->r_aflags = 0; qib_reset_qp() 398 qp->r_flags = 0; qib_reset_qp() 399 qp->s_head = 0; qib_reset_qp() 400 qp->s_tail = 0; qib_reset_qp() 401 qp->s_cur = 0; qib_reset_qp() 402 qp->s_acked = 0; qib_reset_qp() 403 qp->s_last = 0; qib_reset_qp() 404 qp->s_ssn = 1; qib_reset_qp() 405 qp->s_lsn = 0; qib_reset_qp() 406 qp->s_mig_state = IB_MIG_MIGRATED; qib_reset_qp() 407 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); qib_reset_qp() 408 qp->r_head_ack_queue = 0; qib_reset_qp() 409 qp->s_tail_ack_queue = 0; qib_reset_qp() 410 qp->s_num_rd_atomic = 0; qib_reset_qp() 411 if (qp->r_rq.wq) { qib_reset_qp() 412 qp->r_rq.wq->head = 0; qib_reset_qp() 413 qp->r_rq.wq->tail = 0; qib_reset_qp() 415 qp->r_sge.num_sge = 0; qib_reset_qp() 418 static void clear_mr_refs(struct qib_qp *qp, int clr_sends) clear_mr_refs() argument 422 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) clear_mr_refs() 423 qib_put_ss(&qp->s_rdma_read_sge); clear_mr_refs() 425 qib_put_ss(&qp->r_sge); clear_mr_refs() 428 while (qp->s_last != qp->s_head) { clear_mr_refs() 429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); clear_mr_refs() 437 if (qp->ibqp.qp_type == IB_QPT_UD || clear_mr_refs() 438 qp->ibqp.qp_type == IB_QPT_SMI || clear_mr_refs() 439 qp->ibqp.qp_type == IB_QPT_GSI) clear_mr_refs() 441 if (++qp->s_last >= qp->s_size) clear_mr_refs() 442 qp->s_last = 0; clear_mr_refs() 444 if (qp->s_rdma_mr) { clear_mr_refs() 445 qib_put_mr(qp->s_rdma_mr); clear_mr_refs() 446 qp->s_rdma_mr = NULL; clear_mr_refs() 450 if (qp->ibqp.qp_type != IB_QPT_RC) clear_mr_refs() 453 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { clear_mr_refs() 454 struct qib_ack_entry *e = &qp->s_ack_queue[n]; clear_mr_refs() 466 * @qp: the QP to put into the error state 474 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) qib_error_qp() argument 476 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_error_qp() 480 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) qib_error_qp() 483 qp->state = IB_QPS_ERR; qib_error_qp() 485 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { qib_error_qp() 486 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); qib_error_qp() 487 del_timer(&qp->s_timer); qib_error_qp() 490 if (qp->s_flags & QIB_S_ANY_WAIT_SEND) qib_error_qp() 491 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; qib_error_qp() 494 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { qib_error_qp() 495 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; qib_error_qp() 496 list_del_init(&qp->iowait); qib_error_qp() 500 if (!(qp->s_flags & QIB_S_BUSY)) { qib_error_qp() 501 qp->s_hdrwords = 0; qib_error_qp() 502 if (qp->s_rdma_mr) { qib_error_qp() 503 qib_put_mr(qp->s_rdma_mr); qib_error_qp() 504 qp->s_rdma_mr = NULL; qib_error_qp() 506 if (qp->s_tx) { qib_error_qp() 507 qib_put_txreq(qp->s_tx); qib_error_qp() 508 qp->s_tx = NULL; qib_error_qp() 513 if (qp->s_last != qp->s_head) qib_error_qp() 514 qib_schedule_send(qp); qib_error_qp() 516 clear_mr_refs(qp, 0); qib_error_qp() 519 wc.qp = &qp->ibqp; qib_error_qp() 522 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { qib_error_qp() 523 wc.wr_id = qp->r_wr_id; qib_error_qp() 525 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_error_qp() 529 if (qp->r_rq.wq) { qib_error_qp() 534 spin_lock(&qp->r_rq.lock); qib_error_qp() 537 wq = qp->r_rq.wq; qib_error_qp() 539 if (head >= qp->r_rq.size) qib_error_qp() 542 if (tail >= qp->r_rq.size) qib_error_qp() 545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; qib_error_qp() 546 if (++tail >= qp->r_rq.size) qib_error_qp() 548 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_error_qp() 552 spin_unlock(&qp->r_rq.lock); qib_error_qp() 553 } else if (qp->ibqp.event_handler) qib_error_qp() 573 struct qib_qp *qp = to_iqp(ibqp); qib_modify_qp() local 581 spin_lock_irq(&qp->r_lock); qib_modify_qp() 582 spin_lock(&qp->s_lock); qib_modify_qp() 585 attr->cur_qp_state : qp->state; qib_modify_qp() 595 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) qib_modify_qp() 602 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) qib_modify_qp() 617 if (qp->ibqp.qp_type == IB_QPT_SMI || qib_modify_qp() 618 qp->ibqp.qp_type == IB_QPT_GSI || qib_modify_qp() 638 * that to a small mtu. We'll set qp->path_mtu qib_modify_qp() 645 int mtu, pidx = qp->port_num - 1; qib_modify_qp() 676 if (qp->s_mig_state == IB_MIG_ARMED) qib_modify_qp() 681 if (qp->s_mig_state == IB_MIG_REARM) qib_modify_qp() 685 if (qp->s_mig_state == IB_MIG_ARMED) qib_modify_qp() 697 if (qp->state != IB_QPS_RESET) { qib_modify_qp() 698 qp->state = IB_QPS_RESET; qib_modify_qp() 700 if (!list_empty(&qp->iowait)) qib_modify_qp() 701 list_del_init(&qp->iowait); qib_modify_qp() 703 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qib_modify_qp() 704 spin_unlock(&qp->s_lock); qib_modify_qp() 705 spin_unlock_irq(&qp->r_lock); qib_modify_qp() 707 cancel_work_sync(&qp->s_work); qib_modify_qp() 708 del_timer_sync(&qp->s_timer); qib_modify_qp() 709 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); qib_modify_qp() 710 if (qp->s_tx) { qib_modify_qp() 711 qib_put_txreq(qp->s_tx); qib_modify_qp() 712 qp->s_tx = NULL; qib_modify_qp() 714 remove_qp(dev, qp); qib_modify_qp() 715 wait_event(qp->wait, !atomic_read(&qp->refcount)); qib_modify_qp() 716 spin_lock_irq(&qp->r_lock); qib_modify_qp() 717 spin_lock(&qp->s_lock); qib_modify_qp() 718 clear_mr_refs(qp, 1); qib_modify_qp() 719 qib_reset_qp(qp, ibqp->qp_type); qib_modify_qp() 725 qp->r_flags &= ~QIB_R_COMM_EST; qib_modify_qp() 726 qp->state = new_state; qib_modify_qp() 730 qp->s_draining = qp->s_last != qp->s_cur; qib_modify_qp() 731 qp->state = new_state; qib_modify_qp() 735 if (qp->ibqp.qp_type == IB_QPT_RC) qib_modify_qp() 737 qp->state = new_state; qib_modify_qp() 741 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_modify_qp() 745 qp->state = new_state; qib_modify_qp() 750 qp->s_pkey_index = attr->pkey_index; qib_modify_qp() 753 qp->port_num = attr->port_num; qib_modify_qp() 756 qp->remote_qpn = attr->dest_qp_num; qib_modify_qp() 759 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; qib_modify_qp() 760 qp->s_psn = qp->s_next_psn; qib_modify_qp() 761 qp->s_sending_psn = qp->s_next_psn; qib_modify_qp() 762 qp->s_last_psn = qp->s_next_psn - 1; qib_modify_qp() 763 qp->s_sending_hpsn = qp->s_last_psn; qib_modify_qp() 767 qp->r_psn = attr->rq_psn & QIB_PSN_MASK; qib_modify_qp() 770 qp->qp_access_flags = attr->qp_access_flags; qib_modify_qp() 773 qp->remote_ah_attr = attr->ah_attr; qib_modify_qp() 774 qp->s_srate = attr->ah_attr.static_rate; qib_modify_qp() 778 qp->alt_ah_attr = attr->alt_ah_attr; qib_modify_qp() 779 qp->s_alt_pkey_index = attr->alt_pkey_index; qib_modify_qp() 783 qp->s_mig_state = attr->path_mig_state; qib_modify_qp() 785 qp->remote_ah_attr = qp->alt_ah_attr; qib_modify_qp() 786 qp->port_num = qp->alt_ah_attr.port_num; qib_modify_qp() 787 qp->s_pkey_index = qp->s_alt_pkey_index; qib_modify_qp() 792 qp->path_mtu = pmtu; qib_modify_qp() 793 qp->pmtu = ib_mtu_enum_to_int(pmtu); qib_modify_qp() 797 qp->s_retry_cnt = attr->retry_cnt; qib_modify_qp() 798 qp->s_retry = attr->retry_cnt; qib_modify_qp() 802 qp->s_rnr_retry_cnt = attr->rnr_retry; qib_modify_qp() 803 qp->s_rnr_retry = attr->rnr_retry; qib_modify_qp() 807 qp->r_min_rnr_timer = attr->min_rnr_timer; qib_modify_qp() 810 qp->timeout = attr->timeout; qib_modify_qp() 811 qp->timeout_jiffies = qib_modify_qp() 812 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / qib_modify_qp() 817 qp->qkey = attr->qkey; qib_modify_qp() 820 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; qib_modify_qp() 823 qp->s_max_rd_atomic = attr->max_rd_atomic; qib_modify_qp() 825 spin_unlock(&qp->s_lock); qib_modify_qp() 826 spin_unlock_irq(&qp->r_lock); qib_modify_qp() 829 insert_qp(dev, qp); qib_modify_qp() 832 ev.device = qp->ibqp.device; qib_modify_qp() 833 ev.element.qp = &qp->ibqp; qib_modify_qp() 835 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_modify_qp() 838 ev.device = qp->ibqp.device; qib_modify_qp() 839 ev.element.qp = &qp->ibqp; qib_modify_qp() 841 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_modify_qp() 847 spin_unlock(&qp->s_lock); qib_modify_qp() 848 spin_unlock_irq(&qp->r_lock); qib_modify_qp() 858 struct qib_qp *qp = to_iqp(ibqp); qib_query_qp() local 860 attr->qp_state = qp->state; qib_query_qp() 862 attr->path_mtu = qp->path_mtu; qib_query_qp() 863 attr->path_mig_state = qp->s_mig_state; qib_query_qp() 864 attr->qkey = qp->qkey; qib_query_qp() 865 attr->rq_psn = qp->r_psn & QIB_PSN_MASK; qib_query_qp() 866 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; qib_query_qp() 867 attr->dest_qp_num = qp->remote_qpn; qib_query_qp() 868 attr->qp_access_flags = qp->qp_access_flags; qib_query_qp() 869 attr->cap.max_send_wr = qp->s_size - 1; qib_query_qp() 870 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; qib_query_qp() 871 attr->cap.max_send_sge = qp->s_max_sge; qib_query_qp() 872 attr->cap.max_recv_sge = qp->r_rq.max_sge; qib_query_qp() 874 attr->ah_attr = qp->remote_ah_attr; qib_query_qp() 875 attr->alt_ah_attr = qp->alt_ah_attr; qib_query_qp() 876 attr->pkey_index = qp->s_pkey_index; qib_query_qp() 877 attr->alt_pkey_index = qp->s_alt_pkey_index; qib_query_qp() 879 attr->sq_draining = qp->s_draining; qib_query_qp() 880 attr->max_rd_atomic = qp->s_max_rd_atomic; qib_query_qp() 881 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; qib_query_qp() 882 attr->min_rnr_timer = qp->r_min_rnr_timer; qib_query_qp() 883 attr->port_num = qp->port_num; qib_query_qp() 884 attr->timeout = qp->timeout; qib_query_qp() 885 attr->retry_cnt = qp->s_retry_cnt; qib_query_qp() 886 attr->rnr_retry = qp->s_rnr_retry_cnt; qib_query_qp() 887 attr->alt_port_num = qp->alt_ah_attr.port_num; qib_query_qp() 888 attr->alt_timeout = qp->alt_timeout; qib_query_qp() 890 init_attr->event_handler = qp->ibqp.event_handler; qib_query_qp() 891 init_attr->qp_context = qp->ibqp.qp_context; qib_query_qp() 892 init_attr->send_cq = qp->ibqp.send_cq; qib_query_qp() 893 init_attr->recv_cq = qp->ibqp.recv_cq; qib_query_qp() 894 init_attr->srq = qp->ibqp.srq; qib_query_qp() 896 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) qib_query_qp() 900 init_attr->qp_type = qp->ibqp.qp_type; qib_query_qp() 901 init_attr->port_num = qp->port_num; qib_query_qp() 907 * @qp: the queue pair to compute the AETH for 911 __be32 qib_compute_aeth(struct qib_qp *qp) qib_compute_aeth() argument 913 u32 aeth = qp->r_msn & QIB_MSN_MASK; qib_compute_aeth() 915 if (qp->ibqp.srq) { qib_compute_aeth() 924 struct qib_rwq *wq = qp->r_rq.wq; qib_compute_aeth() 930 if (head >= qp->r_rq.size) qib_compute_aeth() 933 if (tail >= qp->r_rq.size) qib_compute_aeth() 942 credits += qp->r_rq.size; qib_compute_aeth() 979 struct qib_qp *qp; qib_create_qp() local 1039 sz = sizeof(*qp); qib_create_qp() 1045 sg_list_sz = sizeof(*qp->r_sg_list) * qib_create_qp() 1048 sg_list_sz = sizeof(*qp->r_sg_list) * qib_create_qp() 1050 qp = kzalloc(sz + sg_list_sz, gfp); qib_create_qp() 1051 if (!qp) { qib_create_qp() 1055 RCU_INIT_POINTER(qp->next, NULL); qib_create_qp() 1056 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); qib_create_qp() 1057 if (!qp->s_hdr) { qib_create_qp() 1061 qp->timeout_jiffies = qib_create_qp() 1062 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / qib_create_qp() 1067 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qib_create_qp() 1068 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qib_create_qp() 1069 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + qib_create_qp() 1072 qp->r_rq.wq = vmalloc_user( qib_create_qp() 1074 qp->r_rq.size * sz); qib_create_qp() 1076 qp->r_rq.wq = __vmalloc( qib_create_qp() 1078 qp->r_rq.size * sz, qib_create_qp() 1081 if (!qp->r_rq.wq) { qib_create_qp() 1088 * ib_create_qp() will initialize qp->ibqp qib_create_qp() 1089 * except for qp->ibqp.qp_num. qib_create_qp() 1091 spin_lock_init(&qp->r_lock); qib_create_qp() 1092 spin_lock_init(&qp->s_lock); qib_create_qp() 1093 spin_lock_init(&qp->r_rq.lock); qib_create_qp() 1094 atomic_set(&qp->refcount, 0); qib_create_qp() 1095 init_waitqueue_head(&qp->wait); qib_create_qp() 1096 init_waitqueue_head(&qp->wait_dma); qib_create_qp() 1097 init_timer(&qp->s_timer); qib_create_qp() 1098 qp->s_timer.data = (unsigned long)qp; qib_create_qp() 1099 INIT_WORK(&qp->s_work, qib_do_send); qib_create_qp() 1100 INIT_LIST_HEAD(&qp->iowait); qib_create_qp() 1101 INIT_LIST_HEAD(&qp->rspwait); qib_create_qp() 1102 qp->state = IB_QPS_RESET; qib_create_qp() 1103 qp->s_wq = swq; qib_create_qp() 1104 qp->s_size = init_attr->cap.max_send_wr + 1; qib_create_qp() 1105 qp->s_max_sge = init_attr->cap.max_send_sge; qib_create_qp() 1107 qp->s_flags = QIB_S_SIGNAL_REQ_WR; qib_create_qp() 1114 vfree(qp->r_rq.wq); qib_create_qp() 1117 qp->ibqp.qp_num = err; qib_create_qp() 1118 qp->port_num = init_attr->port_num; qib_create_qp() 1119 qib_reset_qp(qp, init_attr->qp_type); qib_create_qp() 1135 if (!qp->r_rq.wq) { qib_create_qp() 1145 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; qib_create_qp() 1147 qp->ip = qib_create_mmap_info(dev, s, qib_create_qp() 1149 qp->r_rq.wq); qib_create_qp() 1150 if (!qp->ip) { qib_create_qp() 1155 err = ib_copy_to_udata(udata, &(qp->ip->offset), qib_create_qp() 1156 sizeof(qp->ip->offset)); qib_create_qp() 1174 if (qp->ip) { qib_create_qp() 1176 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); qib_create_qp() 1180 ret = &qp->ibqp; qib_create_qp() 1184 if (qp->ip) qib_create_qp() 1185 kref_put(&qp->ip->ref, qib_release_mmap_info); qib_create_qp() 1187 vfree(qp->r_rq.wq); qib_create_qp() 1188 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); qib_create_qp() 1190 kfree(qp->s_hdr); qib_create_qp() 1191 kfree(qp); qib_create_qp() 1209 struct qib_qp *qp = to_iqp(ibqp); qib_destroy_qp() local 1213 spin_lock_irq(&qp->s_lock); qib_destroy_qp() 1214 if (qp->state != IB_QPS_RESET) { qib_destroy_qp() 1215 qp->state = IB_QPS_RESET; qib_destroy_qp() 1217 if (!list_empty(&qp->iowait)) qib_destroy_qp() 1218 list_del_init(&qp->iowait); qib_destroy_qp() 1220 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); qib_destroy_qp() 1221 spin_unlock_irq(&qp->s_lock); qib_destroy_qp() 1222 cancel_work_sync(&qp->s_work); qib_destroy_qp() 1223 del_timer_sync(&qp->s_timer); qib_destroy_qp() 1224 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); qib_destroy_qp() 1225 if (qp->s_tx) { qib_destroy_qp() 1226 qib_put_txreq(qp->s_tx); qib_destroy_qp() 1227 qp->s_tx = NULL; qib_destroy_qp() 1229 remove_qp(dev, qp); qib_destroy_qp() 1230 wait_event(qp->wait, !atomic_read(&qp->refcount)); qib_destroy_qp() 1231 clear_mr_refs(qp, 1); qib_destroy_qp() 1233 spin_unlock_irq(&qp->s_lock); qib_destroy_qp() 1236 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); qib_destroy_qp() 1241 if (qp->ip) qib_destroy_qp() 1242 kref_put(&qp->ip->ref, qib_release_mmap_info); qib_destroy_qp() 1244 vfree(qp->r_rq.wq); qib_destroy_qp() 1245 vfree(qp->s_wq); qib_destroy_qp() 1246 kfree(qp->s_hdr); qib_destroy_qp() 1247 kfree(qp); qib_destroy_qp() 1278 * @qp: the qp who's send work queue to flush 1283 void qib_get_credit(struct qib_qp *qp, u32 aeth) qib_get_credit() argument 1293 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { qib_get_credit() 1294 qp->s_flags |= QIB_S_UNLIMITED_CREDIT; qib_get_credit() 1295 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { qib_get_credit() 1296 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qib_get_credit() 1297 qib_schedule_send(qp); qib_get_credit() 1300 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { qib_get_credit() 1303 if (qib_cmp24(credit, qp->s_lsn) > 0) { qib_get_credit() 1304 qp->s_lsn = credit; qib_get_credit() 1305 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { qib_get_credit() 1306 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; qib_get_credit() 1307 qib_schedule_send(qp); qib_get_credit() 1317 struct qib_qp *qp; member in struct:qib_qp_iter 1343 struct qib_qp *pqp = iter->qp; qib_qp_iter_next() 1344 struct qib_qp *qp; qib_qp_iter_next() local 1348 qp = rcu_dereference(pqp->next); qib_qp_iter_next() 1350 qp = rcu_dereference(dev->qp_table[n]); qib_qp_iter_next() 1351 pqp = qp; qib_qp_iter_next() 1352 if (qp) { qib_qp_iter_next() 1353 iter->qp = qp; qib_qp_iter_next() 1368 struct qib_qp *qp = iter->qp; qib_qp_iter_print() local 1370 wqe = get_swqe_ptr(qp, qp->s_last); qib_qp_iter_print() 1374 qp->ibqp.qp_num, qib_qp_iter_print() 1375 qp_type_str[qp->ibqp.qp_type], qib_qp_iter_print() 1376 qp->state, qib_qp_iter_print() 1378 qp->s_hdrwords, qib_qp_iter_print() 1379 qp->s_flags, qib_qp_iter_print() 1380 atomic_read(&qp->s_dma_busy), qib_qp_iter_print() 1381 !list_empty(&qp->iowait), qib_qp_iter_print() 1382 qp->timeout, qib_qp_iter_print() 1384 qp->s_lsn, qib_qp_iter_print() 1385 qp->s_last_psn, qib_qp_iter_print() 1386 qp->s_psn, qp->s_next_psn, qib_qp_iter_print() 1387 qp->s_sending_psn, qp->s_sending_hpsn, qib_qp_iter_print() 1388 qp->s_last, qp->s_acked, qp->s_cur, qib_qp_iter_print() 1389 qp->s_tail, qp->s_head, qp->s_size, qib_qp_iter_print() 1390 qp->remote_qpn, qib_qp_iter_print() 1391 qp->remote_ah_attr.dlid); qib_qp_iter_print()
|
H A D | qib_rc.c | 57 static void start_timer(struct qib_qp *qp) start_timer() argument 59 qp->s_flags |= QIB_S_TIMER; start_timer() 60 qp->s_timer.function = rc_timeout; start_timer() 61 /* 4.096 usec. * (1 << qp->timeout) */ start_timer() 62 qp->s_timer.expires = jiffies + qp->timeout_jiffies; start_timer() 63 add_timer(&qp->s_timer); start_timer() 69 * @qp: a pointer to the QP 77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, qib_make_rc_ack() argument 87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_make_rc_ack() 93 switch (qp->s_ack_state) { qib_make_rc_ack() 96 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; qib_make_rc_ack() 108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qib_make_rc_ack() 109 qp->s_tail_ack_queue = 0; qib_make_rc_ack() 114 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { OP() 115 if (qp->s_flags & QIB_S_ACK_PENDING) OP() 120 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 130 qp->s_tail_ack_queue = qp->r_head_ack_queue; 134 qp->s_rdma_mr = e->rdma_sge.mr; 135 if (qp->s_rdma_mr) 136 qib_get_mr(qp->s_rdma_mr); 137 qp->s_ack_rdma_sge.sge = e->rdma_sge; 138 qp->s_ack_rdma_sge.num_sge = 1; 139 qp->s_cur_sge = &qp->s_ack_rdma_sge; 142 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 144 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 147 ohdr->u.aeth = qib_compute_aeth(qp); 149 qp->s_ack_rdma_psn = e->psn; 150 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; 153 qp->s_cur_sge = NULL; 155 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 156 ohdr->u.at.aeth = qib_compute_aeth(qp); 165 bth0 = qp->s_ack_state << 24; 169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 172 qp->s_cur_sge = &qp->s_ack_rdma_sge; 173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; 174 if (qp->s_rdma_mr) 175 qib_get_mr(qp->s_rdma_mr); 176 len = qp->s_ack_rdma_sge.sge.sge_length; 180 ohdr->u.aeth = qib_compute_aeth(qp); 182 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 183 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 186 bth0 = qp->s_ack_state << 24; 187 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; 198 qp->s_ack_state = OP(SEND_ONLY); 199 qp->s_flags &= ~QIB_S_ACK_PENDING; 200 qp->s_cur_sge = NULL; 201 if (qp->s_nak_state) 203 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | 204 (qp->s_nak_state << 207 ohdr->u.aeth = qib_compute_aeth(qp); 211 bth2 = qp->s_ack_psn & QIB_PSN_MASK; 213 qp->s_rdma_ack_cnt++; 214 qp->s_hdrwords = hwords; 215 qp->s_cur_size = len; 216 qib_make_ruc_header(qp, ohdr, bth0, bth2); 220 qp->s_ack_state = OP(ACKNOWLEDGE); 221 qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); 227 * @qp: a pointer to the QP 231 int qib_make_rc_req(struct qib_qp *qp) qib_make_rc_req() argument 233 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_make_rc_req() 241 u32 pmtu = qp->pmtu; qib_make_rc_req() 247 ohdr = &qp->s_hdr->u.oth; qib_make_rc_req() 248 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_make_rc_req() 249 ohdr = &qp->s_hdr->u.l.oth; qib_make_rc_req() 255 spin_lock_irqsave(&qp->s_lock, flags); qib_make_rc_req() 258 if ((qp->s_flags & QIB_S_RESP_PENDING) && qib_make_rc_req() 259 qib_make_rc_ack(dev, qp, ohdr, pmtu)) qib_make_rc_req() 262 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { qib_make_rc_req() 263 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_rc_req() 266 if (qp->s_last == qp->s_head) qib_make_rc_req() 269 if (atomic_read(&qp->s_dma_busy)) { qib_make_rc_req() 270 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_rc_req() 273 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_rc_req() 274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? qib_make_rc_req() 280 if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) qib_make_rc_req() 283 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { qib_make_rc_req() 284 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { qib_make_rc_req() 285 qp->s_flags |= QIB_S_WAIT_PSN; qib_make_rc_req() 288 qp->s_sending_psn = qp->s_psn; qib_make_rc_req() 289 qp->s_sending_hpsn = qp->s_psn - 1; qib_make_rc_req() 297 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_rc_req() 298 switch (qp->s_state) { qib_make_rc_req() 300 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) qib_make_rc_req() 310 if (qp->s_cur == qp->s_tail) { qib_make_rc_req() 312 if (qp->s_tail == qp->s_head) qib_make_rc_req() 319 qp->s_num_rd_atomic) { qib_make_rc_req() 320 qp->s_flags |= QIB_S_WAIT_FENCE; qib_make_rc_req() 323 wqe->psn = qp->s_next_psn; qib_make_rc_req() 332 ss = &qp->s_sge; qib_make_rc_req() 333 bth2 = qp->s_psn & QIB_PSN_MASK; qib_make_rc_req() 338 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_make_rc_req() 339 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req() 340 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qib_make_rc_req() 346 qp->s_state = OP(SEND_FIRST); qib_make_rc_req() 351 qp->s_state = OP(SEND_ONLY); qib_make_rc_req() 353 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); qib_make_rc_req() 361 if (++qp->s_cur == qp->s_size) qib_make_rc_req() 362 qp->s_cur = 0; qib_make_rc_req() 366 if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req() 367 qp->s_lsn++; qib_make_rc_req() 371 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_make_rc_req() 372 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req() 373 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; qib_make_rc_req() 385 qp->s_state = OP(RDMA_WRITE_FIRST); qib_make_rc_req() 390 qp->s_state = OP(RDMA_WRITE_ONLY); qib_make_rc_req() 392 qp->s_state = qib_make_rc_req() 401 if (++qp->s_cur == qp->s_size) qib_make_rc_req() 402 qp->s_cur = 0; qib_make_rc_req() 411 if (qp->s_num_rd_atomic >= qib_make_rc_req() 412 qp->s_max_rd_atomic) { qib_make_rc_req() 413 qp->s_flags |= QIB_S_WAIT_RDMAR; qib_make_rc_req() 416 qp->s_num_rd_atomic++; qib_make_rc_req() 417 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req() 418 qp->s_lsn++; qib_make_rc_req() 424 qp->s_next_psn += (len - 1) / pmtu; qib_make_rc_req() 425 wqe->lpsn = qp->s_next_psn++; qib_make_rc_req() 432 qp->s_state = OP(RDMA_READ_REQUEST); qib_make_rc_req() 437 if (++qp->s_cur == qp->s_size) qib_make_rc_req() 438 qp->s_cur = 0; qib_make_rc_req() 448 if (qp->s_num_rd_atomic >= qib_make_rc_req() 449 qp->s_max_rd_atomic) { qib_make_rc_req() 450 qp->s_flags |= QIB_S_WAIT_RDMAR; qib_make_rc_req() 453 qp->s_num_rd_atomic++; qib_make_rc_req() 454 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qib_make_rc_req() 455 qp->s_lsn++; qib_make_rc_req() 459 qp->s_state = OP(COMPARE_SWAP); qib_make_rc_req() 465 qp->s_state = OP(FETCH_ADD); qib_make_rc_req() 480 if (++qp->s_cur == qp->s_size) qib_make_rc_req() 481 qp->s_cur = 0; qib_make_rc_req() 487 qp->s_sge.sge = wqe->sg_list[0]; qib_make_rc_req() 488 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_rc_req() 489 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_rc_req() 490 qp->s_sge.total_len = wqe->length; qib_make_rc_req() 491 qp->s_len = wqe->length; qib_make_rc_req() 493 qp->s_tail++; qib_make_rc_req() 494 if (qp->s_tail >= qp->s_size) qib_make_rc_req() 495 qp->s_tail = 0; qib_make_rc_req() 498 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req() 500 qp->s_psn++; qib_make_rc_req() 501 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req() 502 qp->s_next_psn = qp->s_psn; qib_make_rc_req() 508 * qp->s_state is normally set to the opcode of the qib_make_rc_req() 516 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req() 519 qp->s_state = OP(SEND_MIDDLE); qib_make_rc_req() 522 bth2 = qp->s_psn++ & QIB_PSN_MASK; qib_make_rc_req() 523 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req() 524 qp->s_next_psn = qp->s_psn; qib_make_rc_req() 525 ss = &qp->s_sge; qib_make_rc_req() 526 len = qp->s_len; qib_make_rc_req() 532 qp->s_state = OP(SEND_LAST); qib_make_rc_req() 534 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); qib_make_rc_req() 542 qp->s_cur++; qib_make_rc_req() 543 if (qp->s_cur >= qp->s_size) qib_make_rc_req() 544 qp->s_cur = 0; qib_make_rc_req() 549 * qp->s_state is normally set to the opcode of the qib_make_rc_req() 557 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req() 560 qp->s_state = OP(RDMA_WRITE_MIDDLE); qib_make_rc_req() 563 bth2 = qp->s_psn++ & QIB_PSN_MASK; qib_make_rc_req() 564 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qib_make_rc_req() 565 qp->s_next_psn = qp->s_psn; qib_make_rc_req() 566 ss = &qp->s_sge; qib_make_rc_req() 567 len = qp->s_len; qib_make_rc_req() 573 qp->s_state = OP(RDMA_WRITE_LAST); qib_make_rc_req() 575 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); qib_make_rc_req() 583 qp->s_cur++; qib_make_rc_req() 584 if (qp->s_cur >= qp->s_size) qib_make_rc_req() 585 qp->s_cur = 0; qib_make_rc_req() 590 * qp->s_state is normally set to the opcode of the qib_make_rc_req() 598 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; qib_make_rc_req() 604 qp->s_state = OP(RDMA_READ_REQUEST); qib_make_rc_req() 606 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK; qib_make_rc_req() 607 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req() 610 qp->s_cur++; qib_make_rc_req() 611 if (qp->s_cur == qp->s_size) qib_make_rc_req() 612 qp->s_cur = 0; qib_make_rc_req() 615 qp->s_sending_hpsn = bth2; qib_make_rc_req() 619 if (qp->s_flags & QIB_S_SEND_ONE) { qib_make_rc_req() 620 qp->s_flags &= ~QIB_S_SEND_ONE; qib_make_rc_req() 621 qp->s_flags |= QIB_S_WAIT_ACK; qib_make_rc_req() 624 qp->s_len -= len; qib_make_rc_req() 625 qp->s_hdrwords = hwords; qib_make_rc_req() 626 qp->s_cur_sge = ss; qib_make_rc_req() 627 qp->s_cur_size = len; qib_make_rc_req() 628 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); qib_make_rc_req() 634 qp->s_flags &= ~QIB_S_BUSY; qib_make_rc_req() 636 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_rc_req() 642 * @qp: a pointer to the QP 648 void qib_send_rc_ack(struct qib_qp *qp) qib_send_rc_ack() argument 650 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_send_rc_ack() 651 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_send_rc_ack() 664 spin_lock_irqsave(&qp->s_lock, flags); qib_send_rc_ack() 666 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_send_rc_ack() 670 if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) qib_send_rc_ack() 678 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qib_send_rc_ack() 680 &qp->remote_ah_attr.grh, hwords, 0); qib_send_rc_ack() 685 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); qib_send_rc_ack() 686 if (qp->s_mig_state == IB_MIG_MIGRATED) qib_send_rc_ack() 688 if (qp->r_nak_state) qib_send_rc_ack() 689 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | qib_send_rc_ack() 690 (qp->r_nak_state << qib_send_rc_ack() 693 ohdr->u.aeth = qib_compute_aeth(qp); qib_send_rc_ack() 694 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qib_send_rc_ack() 695 qp->remote_ah_attr.sl << 4; qib_send_rc_ack() 697 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qib_send_rc_ack() 699 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); qib_send_rc_ack() 701 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); qib_send_rc_ack() 702 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK); qib_send_rc_ack() 704 spin_unlock_irqrestore(&qp->s_lock, flags); qib_send_rc_ack() 711 qp->s_srate, lrh0 >> 12); qib_send_rc_ack() 724 spin_lock_irqsave(&qp->s_lock, flags); qib_send_rc_ack() 759 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { qib_send_rc_ack() 761 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; qib_send_rc_ack() 762 qp->s_nak_state = qp->r_nak_state; qib_send_rc_ack() 763 qp->s_ack_psn = qp->r_ack_psn; qib_send_rc_ack() 766 qib_schedule_send(qp); qib_send_rc_ack() 769 spin_unlock_irqrestore(&qp->s_lock, flags); qib_send_rc_ack() 776 * @qp: the QP 783 static void reset_psn(struct qib_qp *qp, u32 psn) reset_psn() argument 785 u32 n = qp->s_acked; reset_psn() 786 struct qib_swqe *wqe = get_swqe_ptr(qp, n); reset_psn() 789 qp->s_cur = n; reset_psn() 796 qp->s_state = OP(SEND_LAST); reset_psn() 805 if (++n == qp->s_size) reset_psn() 807 if (n == qp->s_tail) reset_psn() 809 wqe = get_swqe_ptr(qp, n); reset_psn() 813 qp->s_cur = n; reset_psn() 819 qp->s_state = OP(SEND_LAST); reset_psn() 833 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); reset_psn() 838 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); reset_psn() 842 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); reset_psn() 850 qp->s_state = OP(SEND_LAST); reset_psn() 853 qp->s_psn = psn; reset_psn() 859 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && reset_psn() 860 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) reset_psn() 861 qp->s_flags |= QIB_S_WAIT_PSN; reset_psn() 868 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) qib_restart_rc() argument 870 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); qib_restart_rc() 873 if (qp->s_retry == 0) { qib_restart_rc() 874 if (qp->s_mig_state == IB_MIG_ARMED) { qib_restart_rc() 875 qib_migrate_qp(qp); qib_restart_rc() 876 qp->s_retry = qp->s_retry_cnt; qib_restart_rc() 877 } else if (qp->s_last == qp->s_acked) { qib_restart_rc() 878 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); qib_restart_rc() 879 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_restart_rc() 884 qp->s_retry--; qib_restart_rc() 886 ibp = to_iport(qp->ibqp.device, qp->port_num); qib_restart_rc() 890 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; qib_restart_rc() 892 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | qib_restart_rc() 896 qp->s_flags |= QIB_S_SEND_ONE; qib_restart_rc() 897 reset_psn(qp, psn); qib_restart_rc() 905 struct qib_qp *qp = (struct qib_qp *)arg; rc_timeout() local 909 spin_lock_irqsave(&qp->r_lock, flags); rc_timeout() 910 spin_lock(&qp->s_lock); rc_timeout() 911 if (qp->s_flags & QIB_S_TIMER) { rc_timeout() 912 ibp = to_iport(qp->ibqp.device, qp->port_num); rc_timeout() 914 qp->s_flags &= ~QIB_S_TIMER; rc_timeout() 915 del_timer(&qp->s_timer); rc_timeout() 916 qib_restart_rc(qp, qp->s_last_psn + 1, 1); rc_timeout() 917 qib_schedule_send(qp); rc_timeout() 919 spin_unlock(&qp->s_lock); rc_timeout() 920 spin_unlock_irqrestore(&qp->r_lock, flags); rc_timeout() 928 struct qib_qp *qp = (struct qib_qp *)arg; qib_rc_rnr_retry() local 931 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rnr_retry() 932 if (qp->s_flags & QIB_S_WAIT_RNR) { qib_rc_rnr_retry() 933 qp->s_flags &= ~QIB_S_WAIT_RNR; qib_rc_rnr_retry() 934 del_timer(&qp->s_timer); qib_rc_rnr_retry() 935 qib_schedule_send(qp); qib_rc_rnr_retry() 937 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rnr_retry() 941 * Set qp->s_sending_psn to the next PSN after the given one. 944 static void reset_sending_psn(struct qib_qp *qp, u32 psn) reset_sending_psn() argument 947 u32 n = qp->s_last; reset_sending_psn() 951 wqe = get_swqe_ptr(qp, n); reset_sending_psn() 954 qp->s_sending_psn = wqe->lpsn + 1; reset_sending_psn() 956 qp->s_sending_psn = psn + 1; reset_sending_psn() 959 if (++n == qp->s_size) reset_sending_psn() 961 if (n == qp->s_tail) reset_sending_psn() 969 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) qib_rc_send_complete() argument 978 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) qib_rc_send_complete() 990 WARN_ON(!qp->s_rdma_ack_cnt); qib_rc_send_complete() 991 qp->s_rdma_ack_cnt--; qib_rc_send_complete() 996 reset_sending_psn(qp, psn); qib_rc_send_complete() 1002 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && qib_rc_send_complete() 1003 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && qib_rc_send_complete() 1004 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_rc_send_complete() 1005 start_timer(qp); qib_rc_send_complete() 1007 while (qp->s_last != qp->s_acked) { qib_rc_send_complete() 1008 wqe = get_swqe_ptr(qp, qp->s_last); qib_rc_send_complete() 1009 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_rc_send_complete() 1010 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) qib_rc_send_complete() 1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || qib_rc_send_complete() 1025 wc.qp = &qp->ibqp; qib_rc_send_complete() 1026 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); qib_rc_send_complete() 1028 if (++qp->s_last >= qp->s_size) qib_rc_send_complete() 1029 qp->s_last = 0; qib_rc_send_complete() 1035 if (qp->s_flags & QIB_S_WAIT_PSN && qib_rc_send_complete() 1036 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { qib_rc_send_complete() 1037 qp->s_flags &= ~QIB_S_WAIT_PSN; qib_rc_send_complete() 1038 qp->s_sending_psn = qp->s_psn; qib_rc_send_complete() 1039 qp->s_sending_hpsn = qp->s_psn - 1; qib_rc_send_complete() 1040 qib_schedule_send(qp); qib_rc_send_complete() 1044 static inline void update_last_psn(struct qib_qp *qp, u32 psn) update_last_psn() argument 1046 qp->s_last_psn = psn; update_last_psn() 1054 static struct qib_swqe *do_rc_completion(struct qib_qp *qp, do_rc_completion() argument 1066 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || do_rc_completion() 1067 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { do_rc_completion() 1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || do_rc_completion() 1081 wc.qp = &qp->ibqp; do_rc_completion() 1082 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); do_rc_completion() 1084 if (++qp->s_last >= qp->s_size) do_rc_completion() 1085 qp->s_last = 0; do_rc_completion() 1089 qp->s_retry = qp->s_retry_cnt; do_rc_completion() 1090 update_last_psn(qp, wqe->lpsn); do_rc_completion() 1097 if (qp->s_acked == qp->s_cur) { do_rc_completion() 1098 if (++qp->s_cur >= qp->s_size) do_rc_completion() 1099 qp->s_cur = 0; do_rc_completion() 1100 qp->s_acked = qp->s_cur; do_rc_completion() 1101 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_completion() 1102 if (qp->s_acked != qp->s_tail) { do_rc_completion() 1103 qp->s_state = OP(SEND_LAST); do_rc_completion() 1104 qp->s_psn = wqe->psn; do_rc_completion() 1107 if (++qp->s_acked >= qp->s_size) do_rc_completion() 1108 qp->s_acked = 0; do_rc_completion() 1109 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) do_rc_completion() 1110 qp->s_draining = 0; do_rc_completion() 1111 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_completion() 1118 * @qp: the QP the ACK came in on 1127 static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument 1138 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { do_rc_ack() 1139 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); do_rc_ack() 1140 del_timer(&qp->s_timer); do_rc_ack() 1152 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_ack() 1153 ibp = to_iport(qp->ibqp.device, qp->port_num); do_rc_ack() 1187 if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { do_rc_ack() 1188 qp->r_flags |= QIB_R_RDMAR_SEQ; do_rc_ack() 1189 qib_restart_rc(qp, qp->s_last_psn + 1, 0); do_rc_ack() 1190 if (list_empty(&qp->rspwait)) { do_rc_ack() 1191 qp->r_flags |= QIB_R_RSP_SEND; do_rc_ack() 1192 atomic_inc(&qp->refcount); do_rc_ack() 1193 list_add_tail(&qp->rspwait, do_rc_ack() 1208 if (qp->s_num_rd_atomic && do_rc_ack() 1212 qp->s_num_rd_atomic--; do_rc_ack() 1214 if ((qp->s_flags & QIB_S_WAIT_FENCE) && do_rc_ack() 1215 !qp->s_num_rd_atomic) { do_rc_ack() 1216 qp->s_flags &= ~(QIB_S_WAIT_FENCE | do_rc_ack() 1218 qib_schedule_send(qp); do_rc_ack() 1219 } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { do_rc_ack() 1220 qp->s_flags &= ~(QIB_S_WAIT_RDMAR | do_rc_ack() 1222 qib_schedule_send(qp); do_rc_ack() 1225 wqe = do_rc_completion(qp, wqe, ibp); do_rc_ack() 1226 if (qp->s_acked == qp->s_tail) do_rc_ack() 1233 if (qp->s_acked != qp->s_tail) { do_rc_ack() 1238 start_timer(qp); do_rc_ack() 1243 if (qib_cmp24(qp->s_psn, psn) <= 0) do_rc_ack() 1244 reset_psn(qp, psn + 1); do_rc_ack() 1245 } else if (qib_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1246 qp->s_state = OP(SEND_LAST); do_rc_ack() 1247 qp->s_psn = psn + 1; do_rc_ack() 1249 if (qp->s_flags & QIB_S_WAIT_ACK) { do_rc_ack() 1250 qp->s_flags &= ~QIB_S_WAIT_ACK; do_rc_ack() 1251 qib_schedule_send(qp); do_rc_ack() 1253 qib_get_credit(qp, aeth); do_rc_ack() 1254 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack() 1255 qp->s_retry = qp->s_retry_cnt; do_rc_ack() 1256 update_last_psn(qp, psn); do_rc_ack() 1262 if (qp->s_acked == qp->s_tail) do_rc_ack() 1264 if (qp->s_flags & QIB_S_WAIT_RNR) do_rc_ack() 1266 if (qp->s_rnr_retry == 0) { do_rc_ack() 1270 if (qp->s_rnr_retry_cnt < 7) do_rc_ack() 1271 qp->s_rnr_retry--; do_rc_ack() 1274 update_last_psn(qp, psn - 1); do_rc_ack() 1276 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; do_rc_ack() 1278 reset_psn(qp, psn); do_rc_ack() 1280 qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); do_rc_ack() 1281 qp->s_flags |= QIB_S_WAIT_RNR; do_rc_ack() 1282 qp->s_timer.function = qib_rc_rnr_retry; do_rc_ack() 1283 qp->s_timer.expires = jiffies + usecs_to_jiffies( do_rc_ack() 1286 add_timer(&qp->s_timer); do_rc_ack() 1290 if (qp->s_acked == qp->s_tail) do_rc_ack() 1293 update_last_psn(qp, psn - 1); do_rc_ack() 1304 qib_restart_rc(qp, psn, 0); do_rc_ack() 1305 qib_schedule_send(qp); do_rc_ack() 1322 if (qp->s_last == qp->s_acked) { do_rc_ack() 1323 qib_send_complete(qp, wqe, status); do_rc_ack() 1324 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); do_rc_ack() 1332 qp->s_retry = qp->s_retry_cnt; do_rc_ack() 1333 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack() 1350 static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, rdma_seq_err() argument 1356 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { rdma_seq_err() 1357 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); rdma_seq_err() 1358 del_timer(&qp->s_timer); rdma_seq_err() 1361 wqe = get_swqe_ptr(qp, qp->s_acked); rdma_seq_err() 1368 wqe = do_rc_completion(qp, wqe, ibp); rdma_seq_err() 1372 qp->r_flags |= QIB_R_RDMAR_SEQ; rdma_seq_err() 1373 qib_restart_rc(qp, qp->s_last_psn + 1, 0); rdma_seq_err() 1374 if (list_empty(&qp->rspwait)) { rdma_seq_err() 1375 qp->r_flags |= QIB_R_RSP_SEND; rdma_seq_err() 1376 atomic_inc(&qp->refcount); rdma_seq_err() 1377 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); rdma_seq_err() 1387 * @qp: the QP for this packet 1400 struct qib_qp *qp, qib_rc_rcv_resp() 1419 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && qib_rc_rcv_resp() 1420 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { qib_rc_rcv_resp() 1426 if (!(qp->s_flags & QIB_S_BUSY)) { qib_rc_rcv_resp() 1437 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rcv_resp() 1438 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) qib_rc_rcv_resp() 1442 if (qib_cmp24(psn, qp->s_next_psn) >= 0) qib_rc_rcv_resp() 1446 diff = qib_cmp24(psn, qp->s_last_psn); qib_rc_rcv_resp() 1452 qib_get_credit(qp, aeth); qib_rc_rcv_resp() 1461 if (qp->r_flags & QIB_R_RDMAR_SEQ) { qib_rc_rcv_resp() 1462 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) qib_rc_rcv_resp() 1464 qp->r_flags &= ~QIB_R_RDMAR_SEQ; qib_rc_rcv_resp() 1467 if (unlikely(qp->s_acked == qp->s_tail)) qib_rc_rcv_resp() 1469 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1484 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || qib_rc_rcv_resp() 1488 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1496 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, qib_rc_rcv_resp() 1502 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp() 1509 if (unlikely(pmtu >= qp->s_rdma_read_len)) qib_rc_rcv_resp() 1514 * 4.096 usec. * (1 << qp->timeout) qib_rc_rcv_resp() 1516 qp->s_flags |= QIB_S_TIMER; qib_rc_rcv_resp() 1517 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); qib_rc_rcv_resp() 1518 if (qp->s_flags & QIB_S_WAIT_ACK) { qib_rc_rcv_resp() 1519 qp->s_flags &= ~QIB_S_WAIT_ACK; qib_rc_rcv_resp() 1520 qib_schedule_send(qp); qib_rc_rcv_resp() 1524 qp->s_retry = qp->s_retry_cnt; qib_rc_rcv_resp() 1530 qp->s_rdma_read_len -= pmtu; qib_rc_rcv_resp() 1531 update_last_psn(qp, psn); qib_rc_rcv_resp() 1532 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rcv_resp() 1533 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); qib_rc_rcv_resp() 1538 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) qib_rc_rcv_resp() 1554 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1555 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, qib_rc_rcv_resp() 1561 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) qib_rc_rcv_resp() 1576 if (unlikely(tlen != qp->s_rdma_read_len)) qib_rc_rcv_resp() 1579 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); qib_rc_rcv_resp() 1580 WARN_ON(qp->s_rdma_read_sge.num_sge); qib_rc_rcv_resp() 1581 (void) do_rc_ack(qp, aeth, psn, qib_rc_rcv_resp() 1591 rdma_seq_err(qp, ibp, psn, rcd); qib_rc_rcv_resp() 1597 if (qp->s_last == qp->s_acked) { qib_rc_rcv_resp() 1598 qib_send_complete(qp, wqe, status); qib_rc_rcv_resp() 1599 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); qib_rc_rcv_resp() 1602 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_rcv_resp() 1611 * @qp: the QP for this packet 1624 struct qib_qp *qp, qib_rc_rcv_error() 1630 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_rc_rcv_error() 1642 if (!qp->r_nak_state) { qib_rc_rcv_error() 1644 qp->r_nak_state = IB_NAK_PSN_ERROR; qib_rc_rcv_error() 1646 qp->r_ack_psn = qp->r_psn; qib_rc_rcv_error() 1652 if (list_empty(&qp->rspwait)) { qib_rc_rcv_error() 1653 qp->r_flags |= QIB_R_RSP_NAK; qib_rc_rcv_error() 1654 atomic_inc(&qp->refcount); qib_rc_rcv_error() 1655 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); qib_rc_rcv_error() 1681 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_rcv_error() 1683 for (i = qp->r_head_ack_queue; ; i = prev) { qib_rc_rcv_error() 1684 if (i == qp->s_tail_ack_queue) qib_rc_rcv_error() 1690 if (prev == qp->r_head_ack_queue) { qib_rc_rcv_error() 1694 e = &qp->s_ack_queue[prev]; qib_rc_rcv_error() 1700 if (prev == qp->s_tail_ack_queue && qib_rc_rcv_error() 1728 qp->pmtu; OP() 1741 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, OP() 1753 qp->s_tail_ack_queue = prev; OP() 1766 qp->s_tail_ack_queue = prev; OP() 1781 if (i == qp->r_head_ack_queue) { 1782 spin_unlock_irqrestore(&qp->s_lock, flags); 1783 qp->r_nak_state = 0; 1784 qp->r_ack_psn = qp->r_psn - 1; 1792 if (!(qp->s_flags & QIB_S_RESP_PENDING)) { 1793 spin_unlock_irqrestore(&qp->s_lock, flags); 1794 qp->r_nak_state = 0; 1795 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; 1802 qp->s_tail_ack_queue = i; 1805 qp->s_ack_state = OP(ACKNOWLEDGE); 1806 qp->s_flags |= QIB_S_RESP_PENDING; 1807 qp->r_nak_state = 0; 1808 qib_schedule_send(qp); 1811 spin_unlock_irqrestore(&qp->s_lock, flags); 1819 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) qib_rc_error() argument 1824 spin_lock_irqsave(&qp->s_lock, flags); qib_rc_error() 1825 lastwqe = qib_error_qp(qp, err); qib_rc_error() 1826 spin_unlock_irqrestore(&qp->s_lock, flags); qib_rc_error() 1831 ev.device = qp->ibqp.device; qib_rc_error() 1832 ev.element.qp = &qp->ibqp; qib_rc_error() 1834 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_rc_error() 1838 static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) qib_update_ack_queue() argument 1845 qp->s_tail_ack_queue = next; qib_update_ack_queue() 1846 qp->s_ack_state = OP(ACKNOWLEDGE); qib_update_ack_queue() 1856 * @qp: the QP for this packet 1863 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_rc_rcv() 1872 u32 pmtu = qp->pmtu; qib_rc_rcv() 1888 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) qib_rc_rcv() 1902 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, qib_rc_rcv() 1908 diff = qib_cmp24(psn, qp->r_psn); qib_rc_rcv() 1910 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) qib_rc_rcv() 1916 switch (qp->r_state) { qib_rc_rcv() 1949 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qib_rc_rcv() 1950 qp->r_flags |= QIB_R_COMM_EST; qib_rc_rcv() 1951 if (qp->ibqp.event_handler) { qib_rc_rcv() 1954 ev.device = qp->ibqp.device; qib_rc_rcv() 1955 ev.element.qp = &qp->ibqp; qib_rc_rcv() 1957 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_rc_rcv() 1964 ret = qib_get_rwqe(qp, 0); qib_rc_rcv() 1969 qp->r_rcv_len = 0; qib_rc_rcv() 1977 qp->r_rcv_len += pmtu; qib_rc_rcv() 1978 if (unlikely(qp->r_rcv_len > qp->r_len)) qib_rc_rcv() 1980 qib_copy_sge(&qp->r_sge, data, pmtu, 1); qib_rc_rcv() 1985 ret = qib_get_rwqe(qp, 1); qib_rc_rcv() 1994 ret = qib_get_rwqe(qp, 0); qib_rc_rcv() 1999 qp->r_rcv_len = 0; qib_rc_rcv() 2023 wc.byte_len = tlen + qp->r_rcv_len; qib_rc_rcv() 2024 if (unlikely(wc.byte_len > qp->r_len)) qib_rc_rcv() 2026 qib_copy_sge(&qp->r_sge, data, tlen, 1); qib_rc_rcv() 2027 qib_put_ss(&qp->r_sge); qib_rc_rcv() 2028 qp->r_msn++; qib_rc_rcv() 2029 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_rc_rcv() 2031 wc.wr_id = qp->r_wr_id; qib_rc_rcv() 2038 wc.qp = &qp->ibqp; qib_rc_rcv() 2039 wc.src_qp = qp->remote_qpn; qib_rc_rcv() 2040 wc.slid = qp->remote_ah_attr.dlid; qib_rc_rcv() 2041 wc.sl = qp->remote_ah_attr.sl; qib_rc_rcv() 2048 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_rc_rcv() 2056 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_rc_rcv() 2061 qp->r_len = be32_to_cpu(reth->length); qib_rc_rcv() 2062 qp->r_rcv_len = 0; qib_rc_rcv() 2063 qp->r_sge.sg_list = NULL; qib_rc_rcv() 2064 if (qp->r_len != 0) { qib_rc_rcv() 2070 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, qib_rc_rcv() 2074 qp->r_sge.num_sge = 1; qib_rc_rcv() 2076 qp->r_sge.num_sge = 0; qib_rc_rcv() 2077 qp->r_sge.sge.mr = NULL; qib_rc_rcv() 2078 qp->r_sge.sge.vaddr = NULL; qib_rc_rcv() 2079 qp->r_sge.sge.length = 0; qib_rc_rcv() 2080 qp->r_sge.sge.sge_length = 0; qib_rc_rcv() 2086 ret = qib_get_rwqe(qp, 1); qib_rc_rcv() 2101 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) OP() 2103 next = qp->r_head_ack_queue + 1; OP() 2107 spin_lock_irqsave(&qp->s_lock, flags); OP() 2108 if (unlikely(next == qp->s_tail_ack_queue)) { OP() 2109 if (!qp->s_ack_queue[next].sent) OP() 2111 qib_update_ack_queue(qp, next); OP() 2113 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP() 2126 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, OP() 2135 qp->r_psn += (len - 1) / pmtu; OP() 2145 e->lpsn = qp->r_psn; OP() 2151 qp->r_msn++; OP() 2152 qp->r_psn++; OP() 2153 qp->r_state = opcode; OP() 2154 qp->r_nak_state = 0; OP() 2155 qp->r_head_ack_queue = next; OP() 2158 qp->s_flags |= QIB_S_RESP_PENDING; OP() 2159 qib_schedule_send(qp); OP() 2174 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) OP() 2176 next = qp->r_head_ack_queue + 1; OP() 2179 spin_lock_irqsave(&qp->s_lock, flags); OP() 2180 if (unlikely(next == qp->s_tail_ack_queue)) { OP() 2181 if (!qp->s_ack_queue[next].sent) OP() 2183 qib_update_ack_queue(qp, next); OP() 2185 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP() 2197 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), OP() 2202 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; OP() 2206 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, OP() 2209 qib_put_mr(qp->r_sge.sge.mr); OP() 2210 qp->r_sge.num_sge = 0; OP() 2215 qp->r_msn++; OP() 2216 qp->r_psn++; OP() 2217 qp->r_state = opcode; OP() 2218 qp->r_nak_state = 0; OP() 2219 qp->r_head_ack_queue = next; OP() 2222 qp->s_flags |= QIB_S_RESP_PENDING; OP() 2223 qib_schedule_send(qp); OP() 2232 qp->r_psn++; 2233 qp->r_state = opcode; 2234 qp->r_ack_psn = psn; 2235 qp->r_nak_state = 0; 2242 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 2243 qp->r_ack_psn = qp->r_psn; 2245 if (list_empty(&qp->rspwait)) { 2246 qp->r_flags |= QIB_R_RSP_NAK; 2247 atomic_inc(&qp->refcount); 2248 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2253 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2254 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; 2255 qp->r_ack_psn = qp->r_psn; 2257 if (list_empty(&qp->rspwait)) { 2258 qp->r_flags |= QIB_R_RSP_NAK; 2259 atomic_inc(&qp->refcount); 2260 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2265 spin_unlock_irqrestore(&qp->s_lock, flags); 2267 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 2268 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 2269 qp->r_ack_psn = qp->r_psn; 2271 if (list_empty(&qp->rspwait)) { 2272 qp->r_flags |= QIB_R_RSP_NAK; 2273 atomic_inc(&qp->refcount); 2274 list_add_tail(&qp->rspwait, &rcd->qp_wait_list); 2279 spin_unlock_irqrestore(&qp->s_lock, flags); 2281 qib_rc_error(qp, IB_WC_LOC_PROT_ERR); 2282 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 2283 qp->r_ack_psn = qp->r_psn; 2285 qib_send_rc_ack(qp); 2289 spin_unlock_irqrestore(&qp->s_lock, flags); 1397 qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, struct qib_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) qib_rc_rcv_resp() argument 1622 qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, struct qib_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) qib_rc_rcv_error() argument 1862 qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_rc_rcv() argument
|
H A D | qib_uc.c | 42 * @qp: a pointer to the QP 46 int qib_make_uc_req(struct qib_qp *qp) qib_make_uc_req() argument 54 u32 pmtu = qp->pmtu; qib_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); qib_make_uc_req() 59 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { qib_make_uc_req() 60 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_uc_req() 63 if (qp->s_last == qp->s_head) qib_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { qib_make_uc_req() 67 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_uc_req() 75 ohdr = &qp->s_hdr->u.oth; qib_make_uc_req() 76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_make_uc_req() 77 ohdr = &qp->s_hdr->u.l.oth; qib_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_uc_req() 85 qp->s_wqe = NULL; qib_make_uc_req() 86 switch (qp->s_state) { qib_make_uc_req() 88 if (!(ib_qib_state_ops[qp->state] & qib_make_uc_req() 92 if (qp->s_cur == qp->s_head) qib_make_uc_req() 97 wqe->psn = qp->s_next_psn; qib_make_uc_req() 98 qp->s_psn = qp->s_next_psn; qib_make_uc_req() 99 qp->s_sge.sge = wqe->sg_list[0]; qib_make_uc_req() 100 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_uc_req() 101 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_uc_req() 102 qp->s_sge.total_len = wqe->length; qib_make_uc_req() 104 qp->s_len = len; qib_make_uc_req() 109 qp->s_state = OP(SEND_FIRST); qib_make_uc_req() 114 qp->s_state = OP(SEND_ONLY); qib_make_uc_req() 116 qp->s_state = qib_make_uc_req() 124 qp->s_wqe = wqe; qib_make_uc_req() 125 if (++qp->s_cur >= qp->s_size) qib_make_uc_req() 126 qp->s_cur = 0; qib_make_uc_req() 138 qp->s_state = OP(RDMA_WRITE_FIRST); qib_make_uc_req() 143 qp->s_state = OP(RDMA_WRITE_ONLY); qib_make_uc_req() 145 qp->s_state = qib_make_uc_req() 153 qp->s_wqe = wqe; qib_make_uc_req() 154 if (++qp->s_cur >= qp->s_size) qib_make_uc_req() 155 qp->s_cur = 0; qib_make_uc_req() 164 qp->s_state = OP(SEND_MIDDLE); qib_make_uc_req() 167 len = qp->s_len; qib_make_uc_req() 173 qp->s_state = OP(SEND_LAST); qib_make_uc_req() 175 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); qib_make_uc_req() 182 qp->s_wqe = wqe; qib_make_uc_req() 183 if (++qp->s_cur >= qp->s_size) qib_make_uc_req() 184 qp->s_cur = 0; qib_make_uc_req() 188 qp->s_state = OP(RDMA_WRITE_MIDDLE); qib_make_uc_req() 191 len = qp->s_len; qib_make_uc_req() 197 qp->s_state = OP(RDMA_WRITE_LAST); qib_make_uc_req() 199 qp->s_state = qib_make_uc_req() 207 qp->s_wqe = wqe; qib_make_uc_req() 208 if (++qp->s_cur >= qp->s_size) qib_make_uc_req() 209 qp->s_cur = 0; qib_make_uc_req() 212 qp->s_len -= len; qib_make_uc_req() 213 qp->s_hdrwords = hwords; qib_make_uc_req() 214 qp->s_cur_sge = &qp->s_sge; qib_make_uc_req() 215 qp->s_cur_size = len; qib_make_uc_req() 216 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), qib_make_uc_req() 217 qp->s_next_psn++ & QIB_PSN_MASK); qib_make_uc_req() 223 qp->s_flags &= ~QIB_S_BUSY; qib_make_uc_req() 225 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_uc_req() 236 * @qp: the QP for this packet. 243 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_uc_rcv() 251 u32 pmtu = qp->pmtu; qib_uc_rcv() 265 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) qib_uc_rcv() 272 if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { qib_uc_rcv() 277 qp->r_psn = psn; qib_uc_rcv() 279 if (qp->r_state == OP(SEND_FIRST) || qib_uc_rcv() 280 qp->r_state == OP(SEND_MIDDLE)) { qib_uc_rcv() 281 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); qib_uc_rcv() 282 qp->r_sge.num_sge = 0; qib_uc_rcv() 284 qib_put_ss(&qp->r_sge); qib_uc_rcv() 285 qp->r_state = OP(SEND_LAST); qib_uc_rcv() 303 switch (qp->r_state) { qib_uc_rcv() 331 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qib_uc_rcv() 332 qp->r_flags |= QIB_R_COMM_EST; qib_uc_rcv() 333 if (qp->ibqp.event_handler) { qib_uc_rcv() 336 ev.device = qp->ibqp.device; qib_uc_rcv() 337 ev.element.qp = &qp->ibqp; qib_uc_rcv() 339 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_uc_rcv() 349 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) qib_uc_rcv() 350 qp->r_sge = qp->s_rdma_read_sge; qib_uc_rcv() 352 ret = qib_get_rwqe(qp, 0); qib_uc_rcv() 358 * qp->s_rdma_read_sge will be the owner qib_uc_rcv() 361 qp->s_rdma_read_sge = qp->r_sge; qib_uc_rcv() 363 qp->r_rcv_len = 0; qib_uc_rcv() 373 qp->r_rcv_len += pmtu; qib_uc_rcv() 374 if (unlikely(qp->r_rcv_len > qp->r_len)) qib_uc_rcv() 376 qib_copy_sge(&qp->r_sge, data, pmtu, 0); qib_uc_rcv() 398 wc.byte_len = tlen + qp->r_rcv_len; qib_uc_rcv() 399 if (unlikely(wc.byte_len > qp->r_len)) qib_uc_rcv() 402 qib_copy_sge(&qp->r_sge, data, tlen, 0); qib_uc_rcv() 403 qib_put_ss(&qp->s_rdma_read_sge); qib_uc_rcv() 405 wc.wr_id = qp->r_wr_id; qib_uc_rcv() 407 wc.qp = &qp->ibqp; qib_uc_rcv() 408 wc.src_qp = qp->remote_qpn; qib_uc_rcv() 409 wc.slid = qp->remote_ah_attr.dlid; qib_uc_rcv() 410 wc.sl = qp->remote_ah_attr.sl; qib_uc_rcv() 417 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_uc_rcv() 426 if (unlikely(!(qp->qp_access_flags & OP() 432 qp->r_len = be32_to_cpu(reth->length); 433 qp->r_rcv_len = 0; 434 qp->r_sge.sg_list = NULL; 435 if (qp->r_len != 0) { 441 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, 445 qp->r_sge.num_sge = 1; 447 qp->r_sge.num_sge = 0; 448 qp->r_sge.sge.mr = NULL; 449 qp->r_sge.sge.vaddr = NULL; 450 qp->r_sge.sge.length = 0; 451 qp->r_sge.sge.sge_length = 0; 464 qp->r_rcv_len += pmtu; 465 if (unlikely(qp->r_rcv_len > qp->r_len)) 467 qib_copy_sge(&qp->r_sge, data, pmtu, 1); 484 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 486 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 487 qib_put_ss(&qp->s_rdma_read_sge); 489 ret = qib_get_rwqe(qp, 1); 495 wc.byte_len = qp->r_len; 497 qib_copy_sge(&qp->r_sge, data, tlen, 1); 498 qib_put_ss(&qp->r_sge); 511 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) 513 qib_copy_sge(&qp->r_sge, data, tlen, 1); 514 qib_put_ss(&qp->r_sge); 521 qp->r_psn++; 522 qp->r_state = opcode; 526 set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); 527 qp->r_sge.num_sge = 0; 533 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 242 qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_uc_rcv() argument
|
H A D | qib_ruc.c | 81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) qib_init_sge() argument 89 rkt = &to_idev(qp->ibqp.device)->lk_table; qib_init_sge() 90 pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); qib_init_sge() 91 ss = &qp->r_sge; qib_init_sge() 92 ss->sg_list = qp->r_sg_list; qib_init_sge() 93 qp->r_len = 0; qib_init_sge() 101 qp->r_len += wqe->sg_list[i].length; qib_init_sge() 105 ss->total_len = qp->r_len; qib_init_sge() 120 wc.qp = &qp->ibqp; qib_init_sge() 122 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); qib_init_sge() 130 * @qp: the QP 131 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge 138 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) qib_get_rwqe() argument 149 if (qp->ibqp.srq) { qib_get_rwqe() 150 srq = to_isrq(qp->ibqp.srq); qib_get_rwqe() 156 rq = &qp->r_rq; qib_get_rwqe() 160 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_get_rwqe() 185 if (!wr_id_only && !qib_init_sge(qp, wqe)) { qib_get_rwqe() 189 qp->r_wr_id = wqe->wr_id; qib_get_rwqe() 192 set_bit(QIB_R_WRID_VALID, &qp->r_aflags); qib_get_rwqe() 212 ev.device = qp->ibqp.device; qib_get_rwqe() 213 ev.element.srq = qp->ibqp.srq; qib_get_rwqe() 229 void qib_migrate_qp(struct qib_qp *qp) qib_migrate_qp() argument 233 qp->s_mig_state = IB_MIG_MIGRATED; qib_migrate_qp() 234 qp->remote_ah_attr = qp->alt_ah_attr; qib_migrate_qp() 235 qp->port_num = qp->alt_ah_attr.port_num; qib_migrate_qp() 236 qp->s_pkey_index = qp->s_alt_pkey_index; qib_migrate_qp() 238 ev.device = qp->ibqp.device; qib_migrate_qp() 239 ev.element.qp = &qp->ibqp; qib_migrate_qp() 241 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); qib_migrate_qp() 268 int has_grh, struct qib_qp *qp, u32 bth0) qib_ruc_check_hdr() 273 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) { qib_ruc_check_hdr() 275 if (qp->alt_ah_attr.ah_flags & IB_AH_GRH) qib_ruc_check_hdr() 278 if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) qib_ruc_check_hdr() 280 guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); qib_ruc_check_hdr() 284 qp->alt_ah_attr.grh.dgid.global.subnet_prefix, qib_ruc_check_hdr() 285 qp->alt_ah_attr.grh.dgid.global.interface_id)) qib_ruc_check_hdr() 289 qib_get_pkey(ibp, qp->s_alt_pkey_index))) { qib_ruc_check_hdr() 293 0, qp->ibqp.qp_num, qib_ruc_check_hdr() 298 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid || qib_ruc_check_hdr() 299 ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num) qib_ruc_check_hdr() 301 spin_lock_irqsave(&qp->s_lock, flags); qib_ruc_check_hdr() 302 qib_migrate_qp(qp); qib_ruc_check_hdr() 303 spin_unlock_irqrestore(&qp->s_lock, flags); qib_ruc_check_hdr() 306 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) qib_ruc_check_hdr() 309 if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) qib_ruc_check_hdr() 312 qp->remote_ah_attr.grh.sgid_index); qib_ruc_check_hdr() 316 qp->remote_ah_attr.grh.dgid.global.subnet_prefix, qib_ruc_check_hdr() 317 qp->remote_ah_attr.grh.dgid.global.interface_id)) qib_ruc_check_hdr() 321 qib_get_pkey(ibp, qp->s_pkey_index))) { qib_ruc_check_hdr() 325 0, qp->ibqp.qp_num, qib_ruc_check_hdr() 330 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid || qib_ruc_check_hdr() 331 ppd_from_ibp(ibp)->port != qp->port_num) qib_ruc_check_hdr() 333 if (qp->s_mig_state == IB_MIG_REARM && qib_ruc_check_hdr() 335 qp->s_mig_state = IB_MIG_ARMED; qib_ruc_check_hdr() 358 struct qib_qp *qp; qib_ruc_loopback() local 373 qp = qib_lookup_qpn(ibp, sqp->remote_qpn); qib_ruc_loopback() 409 if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || qib_ruc_loopback() 410 qp->ibqp.qp_type != sqp->ibqp.qp_type) { qib_ruc_loopback() 437 ret = qib_get_rwqe(qp, 0); qib_ruc_loopback() 445 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_ruc_loopback() 449 ret = qib_get_rwqe(qp, 1); qib_ruc_loopback() 456 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) qib_ruc_loopback() 460 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, qib_ruc_loopback() 465 qp->r_sge.sg_list = NULL; qib_ruc_loopback() 466 qp->r_sge.num_sge = 1; qib_ruc_loopback() 467 qp->r_sge.total_len = wqe->length; qib_ruc_loopback() 471 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) qib_ruc_loopback() 473 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, qib_ruc_loopback() 481 qp->r_sge.sge = wqe->sg_list[0]; qib_ruc_loopback() 482 qp->r_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback() 483 qp->r_sge.num_sge = wqe->wr.num_sge; qib_ruc_loopback() 484 qp->r_sge.total_len = wqe->length; qib_ruc_loopback() 489 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) qib_ruc_loopback() 491 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), qib_ruc_loopback() 497 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; qib_ruc_loopback() 502 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, qib_ruc_loopback() 504 qib_put_mr(qp->r_sge.sge.mr); qib_ruc_loopback() 505 qp->r_sge.num_sge = 0; qib_ruc_loopback() 522 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release); qib_ruc_loopback() 545 qib_put_ss(&qp->r_sge); qib_ruc_loopback() 547 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ruc_loopback() 554 wc.wr_id = qp->r_wr_id; qib_ruc_loopback() 557 wc.qp = &qp->ibqp; qib_ruc_loopback() 558 wc.src_qp = qp->remote_qpn; qib_ruc_loopback() 559 wc.slid = qp->remote_ah_attr.dlid; qib_ruc_loopback() 560 wc.sl = qp->remote_ah_attr.sl; qib_ruc_loopback() 563 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ruc_loopback() 576 if (qp->ibqp.qp_type == IB_QPT_UC) qib_ruc_loopback() 595 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); qib_ruc_loopback() 614 qib_rc_error(qp, wc.status); qib_ruc_loopback() 628 ev.element.qp = &sqp->ibqp; qib_ruc_loopback() 639 if (qp && atomic_dec_and_test(&qp->refcount)) qib_ruc_loopback() 640 wake_up(&qp->wait); qib_ruc_loopback() 674 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, qib_make_ruc_header() argument 677 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_make_ruc_header() 683 extra_bytes = -qp->s_cur_size & 3; qib_make_ruc_header() 684 nwords = (qp->s_cur_size + extra_bytes) >> 2; qib_make_ruc_header() 686 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { qib_make_ruc_header() 687 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qib_make_ruc_header() 688 &qp->remote_ah_attr.grh, qib_make_ruc_header() 689 qp->s_hdrwords, nwords); qib_make_ruc_header() 692 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qib_make_ruc_header() 693 qp->remote_ah_attr.sl << 4; qib_make_ruc_header() 694 qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); qib_make_ruc_header() 695 qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qib_make_ruc_header() 696 qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qib_make_ruc_header() 697 qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qib_make_ruc_header() 698 qp->remote_ah_attr.src_path_bits); qib_make_ruc_header() 699 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); qib_make_ruc_header() 701 if (qp->s_mig_state == IB_MIG_MIGRATED) qib_make_ruc_header() 704 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); qib_make_ruc_header() 719 struct qib_qp *qp = container_of(work, struct qib_qp, s_work); qib_do_send() local 720 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_do_send() 722 int (*make_req)(struct qib_qp *qp); qib_do_send() 725 if ((qp->ibqp.qp_type == IB_QPT_RC || qib_do_send() 726 qp->ibqp.qp_type == IB_QPT_UC) && qib_do_send() 727 (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) { qib_do_send() 728 qib_ruc_loopback(qp); qib_do_send() 732 if (qp->ibqp.qp_type == IB_QPT_RC) qib_do_send() 734 else if (qp->ibqp.qp_type == IB_QPT_UC) qib_do_send() 739 spin_lock_irqsave(&qp->s_lock, flags); qib_do_send() 742 if (!qib_send_ok(qp)) { qib_do_send() 743 spin_unlock_irqrestore(&qp->s_lock, flags); qib_do_send() 747 qp->s_flags |= QIB_S_BUSY; qib_do_send() 749 spin_unlock_irqrestore(&qp->s_lock, flags); qib_do_send() 753 if (qp->s_hdrwords != 0) { qib_do_send() 758 if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, qib_do_send() 759 qp->s_cur_sge, qp->s_cur_size)) qib_do_send() 762 qp->s_hdrwords = 0; qib_do_send() 764 } while (make_req(qp)); qib_do_send() 770 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, qib_send_complete() argument 776 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) qib_send_complete() 784 if (qp->ibqp.qp_type == IB_QPT_UD || qib_send_complete() 785 qp->ibqp.qp_type == IB_QPT_SMI || qib_send_complete() 786 qp->ibqp.qp_type == IB_QPT_GSI) qib_send_complete() 790 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || qib_send_complete() 799 wc.qp = &qp->ibqp; qib_send_complete() 802 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, qib_send_complete() 806 last = qp->s_last; qib_send_complete() 808 if (++last >= qp->s_size) qib_send_complete() 810 qp->s_last = last; qib_send_complete() 811 if (qp->s_acked == old_last) qib_send_complete() 812 qp->s_acked = last; qib_send_complete() 813 if (qp->s_cur == old_last) qib_send_complete() 814 qp->s_cur = last; qib_send_complete() 815 if (qp->s_tail == old_last) qib_send_complete() 816 qp->s_tail = last; qib_send_complete() 817 if (qp->state == IB_QPS_SQD && last == qp->s_cur) qib_send_complete() 818 qp->s_draining = 0; qib_send_complete() 267 qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, struct qib_qp *qp, u32 bth0) qib_ruc_check_hdr() argument
|
H A D | qib_ud.c | 53 struct qib_qp *qp; qib_ud_loopback() local 62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn); qib_ud_loopback() 63 if (!qp) { qib_ud_loopback() 70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_loopback() 71 IB_QPT_UD : qp->ibqp.qp_type; qib_ud_loopback() 74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_ud_loopback() 82 if (qp->ibqp.qp_num > 1) { qib_ud_loopback() 88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); qib_ud_loopback() 94 sqp->ibqp.qp_num, qp->ibqp.qp_num, qib_ud_loopback() 106 if (qp->ibqp.qp_num) { qib_ud_loopback() 111 if (unlikely(qkey != qp->qkey)) { qib_ud_loopback() 118 sqp->ibqp.qp_num, qp->ibqp.qp_num, qib_ud_loopback() 138 spin_lock_irqsave(&qp->r_lock, flags); qib_ud_loopback() 143 if (qp->r_flags & QIB_R_REUSE_SGE) qib_ud_loopback() 144 qp->r_flags &= ~QIB_R_REUSE_SGE; qib_ud_loopback() 148 ret = qib_get_rwqe(qp, 0); qib_ud_loopback() 150 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qib_ud_loopback() 154 if (qp->ibqp.qp_num == 0) qib_ud_loopback() 160 if (unlikely(wc.byte_len > qp->r_len)) { qib_ud_loopback() 161 qp->r_flags |= QIB_R_REUSE_SGE; qib_ud_loopback() 167 qib_copy_sge(&qp->r_sge, &ah_attr->grh, qib_ud_loopback() 171 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_ud_loopback() 184 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); qib_ud_loopback() 204 qib_put_ss(&qp->r_sge); qib_ud_loopback() 205 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ud_loopback() 207 wc.wr_id = qp->r_wr_id; qib_ud_loopback() 210 wc.qp = &qp->ibqp; qib_ud_loopback() 212 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_loopback() 217 wc.port_num = qp->port_num; qib_ud_loopback() 219 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ud_loopback() 223 spin_unlock_irqrestore(&qp->r_lock, flags); qib_ud_loopback() 225 if (atomic_dec_and_test(&qp->refcount)) qib_ud_loopback() 226 wake_up(&qp->wait); qib_ud_loopback() 231 * @qp: the QP 235 int qib_make_ud_req(struct qib_qp *qp) qib_make_ud_req() argument 251 spin_lock_irqsave(&qp->s_lock, flags); qib_make_ud_req() 253 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) { qib_make_ud_req() 254 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) qib_make_ud_req() 257 if (qp->s_last == qp->s_head) qib_make_ud_req() 260 if (atomic_read(&qp->s_dma_busy)) { qib_make_ud_req() 261 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_ud_req() 264 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_ud_req() 265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_ud_req() 269 if (qp->s_cur == qp->s_head) qib_make_ud_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_ud_req() 273 next_cur = qp->s_cur + 1; qib_make_ud_req() 274 if (next_cur >= qp->s_size) qib_make_ud_req() 278 ibp = to_iport(qp->ibqp.device, qp->port_num); qib_make_ud_req() 297 if (atomic_read(&qp->s_dma_busy)) { qib_make_ud_req() 298 qp->s_flags |= QIB_S_WAIT_DMA; qib_make_ud_req() 301 qp->s_cur = next_cur; qib_make_ud_req() 302 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_ud_req() 303 qib_ud_loopback(qp, wqe); qib_make_ud_req() 304 spin_lock_irqsave(&qp->s_lock, flags); qib_make_ud_req() 305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); qib_make_ud_req() 310 qp->s_cur = next_cur; qib_make_ud_req() 315 qp->s_hdrwords = 7; qib_make_ud_req() 316 qp->s_cur_size = wqe->length; qib_make_ud_req() 317 qp->s_cur_sge = &qp->s_sge; qib_make_ud_req() 318 qp->s_srate = ah_attr->static_rate; qib_make_ud_req() 319 qp->s_wqe = wqe; qib_make_ud_req() 320 qp->s_sge.sge = wqe->sg_list[0]; qib_make_ud_req() 321 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_ud_req() 322 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_ud_req() 323 qp->s_sge.total_len = wqe->length; qib_make_ud_req() 327 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, qib_make_ud_req() 329 qp->s_hdrwords, nwords); qib_make_ud_req() 331 ohdr = &qp->s_hdr->u.l.oth; qib_make_ud_req() 339 ohdr = &qp->s_hdr->u.oth; qib_make_ud_req() 342 qp->s_hdrwords++; qib_make_ud_req() 348 if (qp->ibqp.qp_type == IB_QPT_SMI) qib_make_ud_req() 352 qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); qib_make_ud_req() 353 qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ qib_make_ud_req() 354 qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); qib_make_ud_req() 358 qp->s_hdr->lrh[3] = cpu_to_be16(lid); qib_make_ud_req() 360 qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE; qib_make_ud_req() 364 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY : qib_make_ud_req() 365 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ? qib_make_ud_req() 366 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index); qib_make_ud_req() 375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); qib_make_ud_req() 381 qp->qkey : wqe->wr.wr.ud.remote_qkey); qib_make_ud_req() 382 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); qib_make_ud_req() 389 qp->s_flags &= ~QIB_S_BUSY; qib_make_ud_req() 391 spin_unlock_irqrestore(&qp->s_lock, flags); qib_make_ud_req() 422 * @qp: the QP the packet came on 429 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_ud_rcv() 465 if (qp->ibqp.qp_num) { qib_ud_rcv() 469 if (qp->ibqp.qp_num > 1) { qib_ud_rcv() 473 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index); qib_ud_rcv() 479 src_qp, qp->ibqp.qp_num, qib_ud_rcv() 484 if (unlikely(qkey != qp->qkey)) { qib_ud_rcv() 487 src_qp, qp->ibqp.qp_num, qib_ud_rcv() 492 if (unlikely(qp->ibqp.qp_num == 1 && qib_ud_rcv() 514 if (qp->ibqp.qp_num > 1 && qib_ud_rcv() 534 if (qp->r_flags & QIB_R_REUSE_SGE) qib_ud_rcv() 535 qp->r_flags &= ~QIB_R_REUSE_SGE; qib_ud_rcv() 539 ret = qib_get_rwqe(qp, 0); qib_ud_rcv() 541 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qib_ud_rcv() 545 if (qp->ibqp.qp_num == 0) qib_ud_rcv() 551 if (unlikely(wc.byte_len > qp->r_len)) { qib_ud_rcv() 552 qp->r_flags |= QIB_R_REUSE_SGE; qib_ud_rcv() 556 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, qib_ud_rcv() 560 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_ud_rcv() 561 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); qib_ud_rcv() 562 qib_put_ss(&qp->r_sge); qib_ud_rcv() 563 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) qib_ud_rcv() 565 wc.wr_id = qp->r_wr_id; qib_ud_rcv() 569 wc.qp = &qp->ibqp; qib_ud_rcv() 571 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ? qib_ud_rcv() 581 wc.port_num = qp->port_num; qib_ud_rcv() 583 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, qib_ud_rcv() 428 qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_ud_rcv() argument
|
H A D | qib_verbs.c | 334 * @qp: the QP to post on 337 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, qib_post_one_send() argument 350 spin_lock_irqsave(&qp->s_lock, flags); qib_post_one_send() 353 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) qib_post_one_send() 357 if (wr->num_sge > qp->s_max_sge) qib_post_one_send() 366 if (qib_fast_reg_mr(qp, wr)) qib_post_one_send() 368 } else if (qp->ibqp.qp_type == IB_QPT_UC) { qib_post_one_send() 371 } else if (qp->ibqp.qp_type != IB_QPT_RC) { qib_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) qib_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) qib_post_one_send() 389 next = qp->s_head + 1; qib_post_one_send() 390 if (next >= qp->s_size) qib_post_one_send() 392 if (next == qp->s_last) { qib_post_one_send() 397 rkt = &to_idev(qp->ibqp.device)->lk_table; qib_post_one_send() 398 pd = to_ipd(qp->ibqp.pd); qib_post_one_send() 399 wqe = get_swqe_ptr(qp, qp->s_head); qib_post_one_send() 421 if (qp->ibqp.qp_type == IB_QPT_UC || qib_post_one_send() 422 qp->ibqp.qp_type == IB_QPT_RC) { qib_post_one_send() 425 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + qib_post_one_send() 426 qp->port_num - 1)->ibmtu) qib_post_one_send() 430 wqe->ssn = qp->s_ssn++; qib_post_one_send() 431 qp->s_head = next; qib_post_one_send() 447 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { qib_post_one_send() 448 qib_schedule_send(qp); qib_post_one_send() 451 spin_unlock_irqrestore(&qp->s_lock, flags); qib_post_one_send() 466 struct qib_qp *qp = to_iqp(ibqp); qib_post_send() local 471 err = qib_post_one_send(qp, wr, &scheduled); qib_post_send() 480 qib_do_send(&qp->s_work); qib_post_send() 497 struct qib_qp *qp = to_iqp(ibqp); qib_post_receive() local 498 struct qib_rwq *wq = qp->r_rq.wq; qib_post_receive() 503 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { qib_post_receive() 514 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { qib_post_receive() 520 spin_lock_irqsave(&qp->r_rq.lock, flags); qib_post_receive() 522 if (next >= qp->r_rq.size) qib_post_receive() 525 spin_unlock_irqrestore(&qp->r_rq.lock, flags); qib_post_receive() 531 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); qib_post_receive() 539 spin_unlock_irqrestore(&qp->r_rq.lock, flags); qib_post_receive() 554 * @qp: the QP the packet came on 561 int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_qp_rcv() 565 spin_lock(&qp->r_lock); qib_qp_rcv() 568 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { qib_qp_rcv() 573 switch (qp->ibqp.qp_type) { qib_qp_rcv() 580 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); qib_qp_rcv() 584 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); qib_qp_rcv() 588 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); qib_qp_rcv() 596 spin_unlock(&qp->r_lock); qib_qp_rcv() 615 struct qib_qp *qp; qib_ib_rcv() local 668 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); qib_ib_rcv() 686 qp = qib_lookup_qpn(ibp, qp_num); qib_ib_rcv() 687 if (!qp) qib_ib_rcv() 689 rcd->lookaside_qp = qp; qib_ib_rcv() 692 qp = rcd->lookaside_qp; qib_ib_rcv() 694 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); qib_ib_rcv() 710 struct qib_qp *qp = NULL; mem_timer() local 715 qp = list_entry(list->next, struct qib_qp, iowait); mem_timer() 716 list_del_init(&qp->iowait); mem_timer() 717 atomic_inc(&qp->refcount); mem_timer() 723 if (qp) { mem_timer() 724 spin_lock_irqsave(&qp->s_lock, flags); mem_timer() 725 if (qp->s_flags & QIB_S_WAIT_KMEM) { mem_timer() 726 qp->s_flags &= ~QIB_S_WAIT_KMEM; mem_timer() 727 qib_schedule_send(qp); mem_timer() 729 spin_unlock_irqrestore(&qp->s_lock, flags); mem_timer() 730 if (atomic_dec_and_test(&qp->refcount)) mem_timer() 731 wake_up(&qp->wait); mem_timer() 929 struct qib_qp *qp) __get_txreq() 934 spin_lock_irqsave(&qp->s_lock, flags); __get_txreq() 942 spin_unlock_irqrestore(&qp->s_lock, flags); __get_txreq() 945 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && __get_txreq() 946 list_empty(&qp->iowait)) { __get_txreq() 948 qp->s_flags |= QIB_S_WAIT_TX; __get_txreq() 949 list_add_tail(&qp->iowait, &dev->txwait); __get_txreq() 951 qp->s_flags &= ~QIB_S_BUSY; __get_txreq() 953 spin_unlock_irqrestore(&qp->s_lock, flags); __get_txreq() 960 struct qib_qp *qp) get_txreq() 976 tx = __get_txreq(dev, qp); get_txreq() 984 struct qib_qp *qp; qib_put_txreq() local 987 qp = tx->qp; qib_put_txreq() 988 dev = to_idev(qp->ibqp.device); qib_put_txreq() 990 if (atomic_dec_and_test(&qp->refcount)) qib_put_txreq() 991 wake_up(&qp->wait); qib_put_txreq() 1011 qp = list_entry(dev->txwait.next, struct qib_qp, iowait); qib_put_txreq() 1012 list_del_init(&qp->iowait); qib_put_txreq() 1013 atomic_inc(&qp->refcount); qib_put_txreq() 1016 spin_lock_irqsave(&qp->s_lock, flags); qib_put_txreq() 1017 if (qp->s_flags & QIB_S_WAIT_TX) { qib_put_txreq() 1018 qp->s_flags &= ~QIB_S_WAIT_TX; qib_put_txreq() 1019 qib_schedule_send(qp); qib_put_txreq() 1021 spin_unlock_irqrestore(&qp->s_lock, flags); qib_put_txreq() 1023 if (atomic_dec_and_test(&qp->refcount)) qib_put_txreq() 1024 wake_up(&qp->wait); qib_put_txreq() 1037 struct qib_qp *qp, *nqp; qib_verbs_sdma_desc_avail() local 1047 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { qib_verbs_sdma_desc_avail() 1048 if (qp->port_num != ppd->port) qib_verbs_sdma_desc_avail() 1052 if (qp->s_tx->txreq.sg_count > avail) qib_verbs_sdma_desc_avail() 1054 avail -= qp->s_tx->txreq.sg_count; qib_verbs_sdma_desc_avail() 1055 list_del_init(&qp->iowait); qib_verbs_sdma_desc_avail() 1056 atomic_inc(&qp->refcount); qib_verbs_sdma_desc_avail() 1057 qps[n++] = qp; qib_verbs_sdma_desc_avail() 1063 qp = qps[i]; qib_verbs_sdma_desc_avail() 1064 spin_lock(&qp->s_lock); qib_verbs_sdma_desc_avail() 1065 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { qib_verbs_sdma_desc_avail() 1066 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; qib_verbs_sdma_desc_avail() 1067 qib_schedule_send(qp); qib_verbs_sdma_desc_avail() 1069 spin_unlock(&qp->s_lock); qib_verbs_sdma_desc_avail() 1070 if (atomic_dec_and_test(&qp->refcount)) qib_verbs_sdma_desc_avail() 1071 wake_up(&qp->wait); qib_verbs_sdma_desc_avail() 1082 struct qib_qp *qp = tx->qp; sdma_complete() local 1084 spin_lock(&qp->s_lock); sdma_complete() 1086 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); sdma_complete() 1087 else if (qp->ibqp.qp_type == IB_QPT_RC) { sdma_complete() 1093 struct qib_ibdev *dev = to_idev(qp->ibqp.device); sdma_complete() 1097 qib_rc_send_complete(qp, hdr); sdma_complete() 1099 if (atomic_dec_and_test(&qp->s_dma_busy)) { sdma_complete() 1100 if (qp->state == IB_QPS_RESET) sdma_complete() 1101 wake_up(&qp->wait_dma); sdma_complete() 1102 else if (qp->s_flags & QIB_S_WAIT_DMA) { sdma_complete() 1103 qp->s_flags &= ~QIB_S_WAIT_DMA; sdma_complete() 1104 qib_schedule_send(qp); sdma_complete() 1107 spin_unlock(&qp->s_lock); sdma_complete() 1112 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) wait_kmem() argument 1117 spin_lock_irqsave(&qp->s_lock, flags); wait_kmem() 1118 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { wait_kmem() 1120 if (list_empty(&qp->iowait)) { wait_kmem() 1123 qp->s_flags |= QIB_S_WAIT_KMEM; wait_kmem() 1124 list_add_tail(&qp->iowait, &dev->memwait); wait_kmem() 1127 qp->s_flags &= ~QIB_S_BUSY; wait_kmem() 1130 spin_unlock_irqrestore(&qp->s_lock, flags); wait_kmem() 1135 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, qib_verbs_send_dma() argument 1139 struct qib_ibdev *dev = to_idev(qp->ibqp.device); qib_verbs_send_dma() 1141 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); qib_verbs_send_dma() 1149 tx = qp->s_tx; qib_verbs_send_dma() 1151 qp->s_tx = NULL; qib_verbs_send_dma() 1157 tx = get_txreq(dev, qp); qib_verbs_send_dma() 1161 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, qib_verbs_send_dma() 1163 tx->qp = qp; qib_verbs_send_dma() 1164 atomic_inc(&qp->refcount); qib_verbs_send_dma() 1165 tx->wqe = qp->s_wqe; qib_verbs_send_dma() 1166 tx->mr = qp->s_rdma_mr; qib_verbs_send_dma() 1167 if (qp->s_rdma_mr) qib_verbs_send_dma() 1168 qp->s_rdma_mr = NULL; qib_verbs_send_dma() 1225 ret = wait_kmem(dev, qp); qib_verbs_send_dma() 1239 static int no_bufs_available(struct qib_qp *qp) no_bufs_available() argument 1241 struct qib_ibdev *dev = to_idev(qp->ibqp.device); no_bufs_available() 1252 spin_lock_irqsave(&qp->s_lock, flags); no_bufs_available() 1253 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { no_bufs_available() 1255 if (list_empty(&qp->iowait)) { no_bufs_available() 1257 qp->s_flags |= QIB_S_WAIT_PIO; no_bufs_available() 1258 list_add_tail(&qp->iowait, &dev->piowait); no_bufs_available() 1263 qp->s_flags &= ~QIB_S_BUSY; no_bufs_available() 1266 spin_unlock_irqrestore(&qp->s_lock, flags); no_bufs_available() 1270 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, qib_verbs_send_pio() argument 1274 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_verbs_send_pio() 1275 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; qib_verbs_send_pio() 1285 control = dd->f_setpbc_control(ppd, plen, qp->s_srate, qib_verbs_send_pio() 1290 return no_bufs_available(qp); qib_verbs_send_pio() 1351 if (qp->s_rdma_mr) { qib_verbs_send_pio() 1352 qib_put_mr(qp->s_rdma_mr); qib_verbs_send_pio() 1353 qp->s_rdma_mr = NULL; qib_verbs_send_pio() 1355 if (qp->s_wqe) { qib_verbs_send_pio() 1356 spin_lock_irqsave(&qp->s_lock, flags); qib_verbs_send_pio() 1357 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); qib_verbs_send_pio() 1358 spin_unlock_irqrestore(&qp->s_lock, flags); qib_verbs_send_pio() 1359 } else if (qp->ibqp.qp_type == IB_QPT_RC) { qib_verbs_send_pio() 1360 spin_lock_irqsave(&qp->s_lock, flags); qib_verbs_send_pio() 1361 qib_rc_send_complete(qp, ibhdr); qib_verbs_send_pio() 1362 spin_unlock_irqrestore(&qp->s_lock, flags); qib_verbs_send_pio() 1369 * @qp: the QP to send on 1376 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. 1378 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, qib_verbs_send() argument 1381 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); qib_verbs_send() 1397 if (qp->ibqp.qp_type == IB_QPT_SMI || qib_verbs_send() 1399 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, qib_verbs_send() 1402 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, qib_verbs_send() 1512 struct qib_qp *qp; qib_ib_piobufavail() local 1529 qp = list_entry(list->next, struct qib_qp, iowait); qib_ib_piobufavail() 1530 list_del_init(&qp->iowait); qib_ib_piobufavail() 1531 atomic_inc(&qp->refcount); qib_ib_piobufavail() 1532 qps[n++] = qp; qib_ib_piobufavail() 1539 qp = qps[i]; qib_ib_piobufavail() 1541 spin_lock_irqsave(&qp->s_lock, flags); qib_ib_piobufavail() 1542 if (qp->s_flags & QIB_S_WAIT_PIO) { qib_ib_piobufavail() 1543 qp->s_flags &= ~QIB_S_WAIT_PIO; qib_ib_piobufavail() 1544 qib_schedule_send(qp); qib_ib_piobufavail() 1546 spin_unlock_irqrestore(&qp->s_lock, flags); qib_ib_piobufavail() 1549 if (atomic_dec_and_test(&qp->refcount)) qib_ib_piobufavail() 1550 wake_up(&qp->wait); qib_ib_piobufavail() 2336 void qib_schedule_send(struct qib_qp *qp) qib_schedule_send() argument 2338 if (qib_send_ok(qp)) { qib_schedule_send() 2340 to_iport(qp->ibqp.device, qp->port_num); qib_schedule_send() 2343 queue_work(ppd->qib_wq, &qp->s_work); qib_schedule_send() 560 qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) qib_qp_rcv() argument 928 __get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) __get_txreq() argument 959 get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) get_txreq() argument
|
H A D | qib_verbs_mcast.c | 40 * @qp: the QP to link 42 static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) qib_mcast_qp_alloc() argument 50 mqp->qp = qp; qib_mcast_qp_alloc() 51 atomic_inc(&qp->refcount); qib_mcast_qp_alloc() 59 struct qib_qp *qp = mqp->qp; qib_mcast_qp_free() local 62 if (atomic_dec_and_test(&qp->refcount)) qib_mcast_qp_free() 63 wake_up(&qp->wait); qib_mcast_qp_free() 182 if (p->qp == mqp->qp) { qib_mcast_add() 227 struct qib_qp *qp = to_iqp(ibqp); qib_multicast_attach() local 234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { qib_multicast_attach() 248 mqp = qib_mcast_qp_alloc(qp); qib_multicast_attach() 254 ibp = to_iport(ibqp->device, qp->port_num); qib_multicast_attach() 285 struct qib_qp *qp = to_iqp(ibqp); qib_multicast_detach() local 287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); qib_multicast_detach() 294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) qib_multicast_detach() 320 if (p->qp != qp) qib_multicast_detach()
|
H A D | qib_driver.c | 309 struct qib_qp *qp = NULL; qib_rcv_hdrerr() local 353 qp = qib_lookup_qpn(ibp, qp_num); qib_rcv_hdrerr() 354 if (!qp) qib_rcv_hdrerr() 361 spin_lock(&qp->r_lock); qib_rcv_hdrerr() 364 if (!(ib_qib_state_ops[qp->state] & qib_rcv_hdrerr() 370 switch (qp->ibqp.qp_type) { qib_rcv_hdrerr() 376 qp, qib_rcv_hdrerr() 384 diff = qib_cmp24(psn, qp->r_psn); qib_rcv_hdrerr() 385 if (!qp->r_nak_state && diff >= 0) { qib_rcv_hdrerr() 387 qp->r_nak_state = qib_rcv_hdrerr() 390 qp->r_ack_psn = qp->r_psn; qib_rcv_hdrerr() 399 if (list_empty(&qp->rspwait)) { qib_rcv_hdrerr() 400 qp->r_flags |= qib_rcv_hdrerr() 403 &qp->refcount); qib_rcv_hdrerr() 405 &qp->rspwait, qib_rcv_hdrerr() 421 spin_unlock(&qp->r_lock); qib_rcv_hdrerr() 426 if (atomic_dec_and_test(&qp->refcount)) qib_rcv_hdrerr() 427 wake_up(&qp->wait); qib_rcv_hdrerr() 459 struct qib_qp *qp, *nqp; qib_kreceive() local 568 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { qib_kreceive() 569 list_del_init(&qp->rspwait); qib_kreceive() 570 if (qp->r_flags & QIB_R_RSP_NAK) { qib_kreceive() 571 qp->r_flags &= ~QIB_R_RSP_NAK; qib_kreceive() 572 qib_send_rc_ack(qp); qib_kreceive() 574 if (qp->r_flags & QIB_R_RSP_SEND) { qib_kreceive() 577 qp->r_flags &= ~QIB_R_RSP_SEND; qib_kreceive() 578 spin_lock_irqsave(&qp->s_lock, flags); qib_kreceive() 579 if (ib_qib_state_ops[qp->state] & qib_kreceive() 581 qib_schedule_send(qp); qib_kreceive() 582 spin_unlock_irqrestore(&qp->s_lock, flags); qib_kreceive() 584 if (atomic_dec_and_test(&qp->refcount)) qib_kreceive() 585 wake_up(&qp->wait); qib_kreceive()
|
H A D | qib_verbs.h | 212 struct qib_qp *qp; member in struct:qib_mcast_qp 338 * in qp->s_max_sge. 352 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). 609 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, get_swqe_ptr() argument 612 return (struct qib_swqe *)((char *)qp->s_wq + get_swqe_ptr() 614 qp->s_max_sge * get_swqe_ptr() 847 static inline int qib_send_ok(struct qib_qp *qp) qib_send_ok() argument 849 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && qib_send_ok() 850 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || qib_send_ok() 851 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); qib_send_ok() 857 void qib_schedule_send(struct qib_qp *qp); 906 __be32 qib_compute_aeth(struct qib_qp *qp); 916 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); 942 void qib_get_credit(struct qib_qp *qp, u32 aeth); 950 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 959 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 962 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 970 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); 972 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); 974 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); 977 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 986 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, 1041 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); 1087 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); 1089 void qib_migrate_qp(struct qib_qp *qp); 1092 int has_grh, struct qib_qp *qp, u32 bth0); 1097 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, 1102 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, 1105 void qib_send_rc_ack(struct qib_qp *qp); 1107 int qib_make_rc_req(struct qib_qp *qp); 1109 int qib_make_uc_req(struct qib_qp *qp); 1111 int qib_make_ud_req(struct qib_qp *qp);
|
H A D | qib_keys.c | 241 * @qp: qp for validation 252 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, qib_rkey_ok() argument 255 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; qib_rkey_ok() 266 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_rkey_ok() 289 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) qib_rkey_ok() 341 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) qib_fast_reg_mr() argument 343 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; qib_fast_reg_mr() 344 struct qib_pd *pd = to_ipd(qp->ibqp.pd); qib_fast_reg_mr() 360 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) qib_fast_reg_mr()
|
H A D | qib_sdma.c | 516 atomic_inc(&tx->qp->s_dma_busy); complete_sdma_err_req() 539 struct qib_qp *qp; qib_sdma_verbs_send() local 648 atomic_inc(&tx->qp->s_dma_busy); qib_sdma_verbs_send() 665 qp = tx->qp; qib_sdma_verbs_send() 667 spin_lock(&qp->r_lock); qib_sdma_verbs_send() 668 spin_lock(&qp->s_lock); qib_sdma_verbs_send() 669 if (qp->ibqp.qp_type == IB_QPT_RC) { qib_sdma_verbs_send() 671 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) qib_sdma_verbs_send() 672 qib_error_qp(qp, IB_WC_GENERAL_ERR); qib_sdma_verbs_send() 673 } else if (qp->s_wqe) qib_sdma_verbs_send() 674 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); qib_sdma_verbs_send() 675 spin_unlock(&qp->s_lock); qib_sdma_verbs_send() 676 spin_unlock(&qp->r_lock); qib_sdma_verbs_send() 681 qp = tx->qp; qib_sdma_verbs_send() 682 spin_lock(&qp->s_lock); qib_sdma_verbs_send() 683 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { qib_sdma_verbs_send() 693 qp->s_tx = tx; qib_sdma_verbs_send() 696 if (list_empty(&qp->iowait)) { qib_sdma_verbs_send() 701 qp->s_flags |= QIB_S_WAIT_DMA_DESC; qib_sdma_verbs_send() 702 list_add_tail(&qp->iowait, &dev->dmawait); qib_sdma_verbs_send() 705 qp->s_flags &= ~QIB_S_BUSY; qib_sdma_verbs_send() 706 spin_unlock(&qp->s_lock); qib_sdma_verbs_send() 709 spin_unlock(&qp->s_lock); qib_sdma_verbs_send()
|
H A D | qib_cq.c | 49 * This may be called with qp->s_lock held. 91 wc->uqueue[head].qp_num = entry->qp->qp_num; qib_cq_enter()
|
H A D | qib_mad.h | 306 __be32 qp; member in struct:ib_cc_trap_key_violation_attr
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_rc.c | 56 * ipath_init_restart- initialize the qp->s_sge after a restart 57 * @qp: the QP who's SGE we're restarting 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, ipath_init_restart() 67 ib_mtu_enum_to_int(qp->path_mtu)); ipath_init_restart() 68 dev = to_idev(qp->ibqp.device); ipath_init_restart() 70 if (list_empty(&qp->timerwait)) ipath_init_restart() 71 list_add_tail(&qp->timerwait, ipath_init_restart() 78 * @qp: a pointer to the QP 86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp, ipath_make_rc_ack() argument 96 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_make_rc_ack() 102 switch (qp->s_ack_state) { ipath_make_rc_ack() 111 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC) ipath_make_rc_ack() 112 qp->s_tail_ack_queue = 0; ipath_make_rc_ack() 117 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { OP() 118 if (qp->s_flags & IPATH_S_ACK_PENDING) OP() 120 qp->s_ack_state = OP(ACKNOWLEDGE); OP() 124 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; 127 qp->s_ack_rdma_sge = e->rdma_sge; 128 qp->s_cur_sge = &qp->s_ack_rdma_sge; 132 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); 134 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); 137 ohdr->u.aeth = ipath_compute_aeth(qp); 139 qp->s_ack_rdma_psn = e->psn; 140 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 143 qp->s_cur_sge = NULL; 145 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); 146 ohdr->u.at.aeth = ipath_compute_aeth(qp); 155 bth0 = qp->s_ack_state << 24; 159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); 162 len = qp->s_ack_rdma_sge.sge.sge_length; 166 ohdr->u.aeth = ipath_compute_aeth(qp); 168 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); 169 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1; 171 bth0 = qp->s_ack_state << 24; 172 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK; 183 qp->s_ack_state = OP(SEND_ONLY); 184 qp->s_flags &= ~IPATH_S_ACK_PENDING; 185 qp->s_cur_sge = NULL; 186 if (qp->s_nak_state) 188 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | 189 (qp->s_nak_state << 192 ohdr->u.aeth = ipath_compute_aeth(qp); 196 bth2 = qp->s_ack_psn & IPATH_PSN_MASK; 198 qp->s_hdrwords = hwords; 199 qp->s_cur_size = len; 200 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2); 209 * @qp: a pointer to the QP 213 int ipath_make_rc_req(struct ipath_qp *qp) ipath_make_rc_req() argument 215 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_make_rc_req() 223 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_make_rc_req() 228 ohdr = &qp->s_hdr.u.oth; ipath_make_rc_req() 229 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ipath_make_rc_req() 230 ohdr = &qp->s_hdr.u.l.oth; ipath_make_rc_req() 236 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_rc_req() 239 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue || ipath_make_rc_req() 240 (qp->s_flags & IPATH_S_ACK_PENDING) || ipath_make_rc_req() 241 qp->s_ack_state != OP(ACKNOWLEDGE)) && ipath_make_rc_req() 242 ipath_make_rc_ack(dev, qp, ohdr, pmtu)) ipath_make_rc_req() 245 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { ipath_make_rc_req() 246 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_rc_req() 249 if (qp->s_last == qp->s_head) ipath_make_rc_req() 252 if (atomic_read(&qp->s_dma_busy)) { ipath_make_rc_req() 253 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_rc_req() 256 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_rc_req() 257 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_rc_req() 262 if (qp->s_rnr_timeout) { ipath_make_rc_req() 263 qp->s_flags |= IPATH_S_WAITING; ipath_make_rc_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_rc_req() 273 switch (qp->s_state) { ipath_make_rc_req() 275 if (!(ib_ipath_state_ops[qp->state] & ipath_make_rc_req() 286 if (qp->s_cur == qp->s_tail) { ipath_make_rc_req() 288 if (qp->s_tail == qp->s_head) ipath_make_rc_req() 295 qp->s_num_rd_atomic) { ipath_make_rc_req() 296 qp->s_flags |= IPATH_S_FENCE_PENDING; ipath_make_rc_req() 299 wqe->psn = qp->s_next_psn; ipath_make_rc_req() 308 ss = &qp->s_sge; ipath_make_rc_req() 314 if (qp->s_lsn != (u32) -1 && ipath_make_rc_req() 315 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req() 316 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; ipath_make_rc_req() 322 qp->s_state = OP(SEND_FIRST); ipath_make_rc_req() 327 qp->s_state = OP(SEND_ONLY); ipath_make_rc_req() 329 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); ipath_make_rc_req() 337 if (++qp->s_cur == qp->s_size) ipath_make_rc_req() 338 qp->s_cur = 0; ipath_make_rc_req() 342 if (newreq && qp->s_lsn != (u32) -1) ipath_make_rc_req() 343 qp->s_lsn++; ipath_make_rc_req() 347 if (qp->s_lsn != (u32) -1 && ipath_make_rc_req() 348 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req() 349 qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; ipath_make_rc_req() 361 qp->s_state = OP(RDMA_WRITE_FIRST); ipath_make_rc_req() 366 qp->s_state = OP(RDMA_WRITE_ONLY); ipath_make_rc_req() 368 qp->s_state = ipath_make_rc_req() 377 if (++qp->s_cur == qp->s_size) ipath_make_rc_req() 378 qp->s_cur = 0; ipath_make_rc_req() 387 if (qp->s_num_rd_atomic >= ipath_make_rc_req() 388 qp->s_max_rd_atomic) { ipath_make_rc_req() 389 qp->s_flags |= IPATH_S_RDMAR_PENDING; ipath_make_rc_req() 392 qp->s_num_rd_atomic++; ipath_make_rc_req() 393 if (qp->s_lsn != (u32) -1) ipath_make_rc_req() 394 qp->s_lsn++; ipath_make_rc_req() 400 qp->s_next_psn += (len - 1) / pmtu; ipath_make_rc_req() 401 wqe->lpsn = qp->s_next_psn++; ipath_make_rc_req() 408 qp->s_state = OP(RDMA_READ_REQUEST); ipath_make_rc_req() 412 if (++qp->s_cur == qp->s_size) ipath_make_rc_req() 413 qp->s_cur = 0; ipath_make_rc_req() 423 if (qp->s_num_rd_atomic >= ipath_make_rc_req() 424 qp->s_max_rd_atomic) { ipath_make_rc_req() 425 qp->s_flags |= IPATH_S_RDMAR_PENDING; ipath_make_rc_req() 428 qp->s_num_rd_atomic++; ipath_make_rc_req() 429 if (qp->s_lsn != (u32) -1) ipath_make_rc_req() 430 qp->s_lsn++; ipath_make_rc_req() 434 qp->s_state = OP(COMPARE_SWAP); ipath_make_rc_req() 440 qp->s_state = OP(FETCH_ADD); ipath_make_rc_req() 454 if (++qp->s_cur == qp->s_size) ipath_make_rc_req() 455 qp->s_cur = 0; ipath_make_rc_req() 461 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_rc_req() 462 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_rc_req() 463 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_rc_req() 464 qp->s_len = wqe->length; ipath_make_rc_req() 466 qp->s_tail++; ipath_make_rc_req() 467 if (qp->s_tail >= qp->s_size) ipath_make_rc_req() 468 qp->s_tail = 0; ipath_make_rc_req() 470 bth2 |= qp->s_psn & IPATH_PSN_MASK; ipath_make_rc_req() 472 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req() 474 qp->s_psn++; ipath_make_rc_req() 475 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req() 476 qp->s_next_psn = qp->s_psn; ipath_make_rc_req() 484 if (list_empty(&qp->timerwait)) ipath_make_rc_req() 485 list_add_tail(&qp->timerwait, ipath_make_rc_req() 495 ipath_init_restart(qp, wqe); ipath_make_rc_req() 498 qp->s_state = OP(SEND_MIDDLE); ipath_make_rc_req() 501 bth2 = qp->s_psn++ & IPATH_PSN_MASK; ipath_make_rc_req() 502 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req() 503 qp->s_next_psn = qp->s_psn; ipath_make_rc_req() 504 ss = &qp->s_sge; ipath_make_rc_req() 505 len = qp->s_len; ipath_make_rc_req() 511 qp->s_state = OP(SEND_LAST); ipath_make_rc_req() 513 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); ipath_make_rc_req() 521 qp->s_cur++; ipath_make_rc_req() 522 if (qp->s_cur >= qp->s_size) ipath_make_rc_req() 523 qp->s_cur = 0; ipath_make_rc_req() 531 ipath_init_restart(qp, wqe); ipath_make_rc_req() 534 qp->s_state = OP(RDMA_WRITE_MIDDLE); ipath_make_rc_req() 537 bth2 = qp->s_psn++ & IPATH_PSN_MASK; ipath_make_rc_req() 538 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) ipath_make_rc_req() 539 qp->s_next_psn = qp->s_psn; ipath_make_rc_req() 540 ss = &qp->s_sge; ipath_make_rc_req() 541 len = qp->s_len; ipath_make_rc_req() 547 qp->s_state = OP(RDMA_WRITE_LAST); ipath_make_rc_req() 549 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); ipath_make_rc_req() 557 qp->s_cur++; ipath_make_rc_req() 558 if (qp->s_cur >= qp->s_size) ipath_make_rc_req() 559 qp->s_cur = 0; ipath_make_rc_req() 567 ipath_init_restart(qp, wqe); ipath_make_rc_req() 568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; ipath_make_rc_req() 573 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); ipath_make_rc_req() 574 qp->s_state = OP(RDMA_READ_REQUEST); ipath_make_rc_req() 576 bth2 = qp->s_psn & IPATH_PSN_MASK; ipath_make_rc_req() 577 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req() 580 qp->s_cur++; ipath_make_rc_req() 581 if (qp->s_cur == qp->s_size) ipath_make_rc_req() 582 qp->s_cur = 0; ipath_make_rc_req() 585 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0) ipath_make_rc_req() 587 qp->s_len -= len; ipath_make_rc_req() 588 qp->s_hdrwords = hwords; ipath_make_rc_req() 589 qp->s_cur_sge = ss; ipath_make_rc_req() 590 qp->s_cur_size = len; ipath_make_rc_req() 591 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2); ipath_make_rc_req() 597 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_rc_req() 599 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_rc_req() 605 * @qp: a pointer to the QP 612 static void send_rc_ack(struct ipath_qp *qp) send_rc_ack() argument 614 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); send_rc_ack() 624 spin_lock_irqsave(&qp->s_lock, flags); send_rc_ack() 627 if (qp->r_head_ack_queue != qp->s_tail_ack_queue || send_rc_ack() 628 (qp->s_flags & IPATH_S_ACK_PENDING) || send_rc_ack() 629 qp->s_ack_state != OP(ACKNOWLEDGE)) send_rc_ack() 632 spin_unlock_irqrestore(&qp->s_lock, flags); send_rc_ack() 648 spin_lock_irqsave(&qp->s_lock, flags); send_rc_ack() 657 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { send_rc_ack() 659 &qp->remote_ah_attr.grh, send_rc_ack() 665 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) | send_rc_ack() 667 if (qp->r_nak_state) send_rc_ack() 668 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | send_rc_ack() 669 (qp->r_nak_state << send_rc_ack() 672 ohdr->u.aeth = ipath_compute_aeth(qp); send_rc_ack() 673 lrh0 |= qp->remote_ah_attr.sl << 4; send_rc_ack() 675 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); send_rc_ack() 678 qp->remote_ah_attr.src_path_bits); send_rc_ack() 680 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); send_rc_ack() 681 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); send_rc_ack() 701 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) { send_rc_ack() 703 qp->s_flags |= IPATH_S_ACK_PENDING; send_rc_ack() 704 qp->s_nak_state = qp->r_nak_state; send_rc_ack() 705 qp->s_ack_psn = qp->r_ack_psn; send_rc_ack() 708 ipath_schedule_send(qp); send_rc_ack() 710 spin_unlock_irqrestore(&qp->s_lock, flags); send_rc_ack() 717 * @qp: the QP 724 static void reset_psn(struct ipath_qp *qp, u32 psn) reset_psn() argument 726 u32 n = qp->s_last; reset_psn() 727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); reset_psn() 730 qp->s_cur = n; reset_psn() 737 qp->s_state = OP(SEND_LAST); reset_psn() 746 if (++n == qp->s_size) reset_psn() 748 if (n == qp->s_tail) reset_psn() 750 wqe = get_swqe_ptr(qp, n); reset_psn() 754 qp->s_cur = n; reset_psn() 760 qp->s_state = OP(SEND_LAST); reset_psn() 774 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); reset_psn() 779 qp->s_state = OP(RDMA_READ_RESPONSE_LAST); reset_psn() 783 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); reset_psn() 791 qp->s_state = OP(SEND_LAST); reset_psn() 794 qp->s_psn = psn; reset_psn() 799 * @qp: the QP to restart 805 void ipath_restart_rc(struct ipath_qp *qp, u32 psn) ipath_restart_rc() argument 807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); ipath_restart_rc() 810 if (qp->s_retry == 0) { ipath_restart_rc() 811 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); ipath_restart_rc() 812 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); ipath_restart_rc() 815 qp->s_retry--; ipath_restart_rc() 821 dev = to_idev(qp->ibqp.device); ipath_restart_rc() 823 if (!list_empty(&qp->timerwait)) ipath_restart_rc() 824 list_del_init(&qp->timerwait); ipath_restart_rc() 825 if (!list_empty(&qp->piowait)) ipath_restart_rc() 826 list_del_init(&qp->piowait); ipath_restart_rc() 832 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK; ipath_restart_rc() 834 reset_psn(qp, psn); ipath_restart_rc() 835 ipath_schedule_send(qp); ipath_restart_rc() 841 static inline void update_last_psn(struct ipath_qp *qp, u32 psn) update_last_psn() argument 843 qp->s_last_psn = psn; update_last_psn() 848 * @qp: the QP the ACK came in on 857 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode, do_rc_ack() argument 860 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); do_rc_ack() 875 if (!list_empty(&qp->timerwait)) do_rc_ack() 876 list_del_init(&qp->timerwait); do_rc_ack() 888 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack() 925 update_last_psn(qp, wqe->psn - 1); do_rc_ack() 927 ipath_restart_rc(qp, wqe->psn); do_rc_ack() 937 if (qp->s_num_rd_atomic && do_rc_ack() 941 qp->s_num_rd_atomic--; do_rc_ack() 943 if (((qp->s_flags & IPATH_S_FENCE_PENDING) && do_rc_ack() 944 !qp->s_num_rd_atomic) || do_rc_ack() 945 qp->s_flags & IPATH_S_RDMAR_PENDING) do_rc_ack() 946 ipath_schedule_send(qp); do_rc_ack() 949 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || do_rc_ack() 956 wc.qp = &qp->ibqp; do_rc_ack() 957 wc.src_qp = qp->remote_qpn; do_rc_ack() 958 wc.slid = qp->remote_ah_attr.dlid; do_rc_ack() 959 wc.sl = qp->remote_ah_attr.sl; do_rc_ack() 960 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); do_rc_ack() 962 qp->s_retry = qp->s_retry_cnt; do_rc_ack() 968 if (qp->s_last == qp->s_cur) { do_rc_ack() 969 if (++qp->s_cur >= qp->s_size) do_rc_ack() 970 qp->s_cur = 0; do_rc_ack() 971 qp->s_last = qp->s_cur; do_rc_ack() 972 if (qp->s_last == qp->s_tail) do_rc_ack() 974 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_ack() 975 qp->s_state = OP(SEND_LAST); do_rc_ack() 976 qp->s_psn = wqe->psn; do_rc_ack() 978 if (++qp->s_last >= qp->s_size) do_rc_ack() 979 qp->s_last = 0; do_rc_ack() 980 if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur) do_rc_ack() 981 qp->s_draining = 0; do_rc_ack() 982 if (qp->s_last == qp->s_tail) do_rc_ack() 984 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack() 992 if (qp->s_last != qp->s_tail) { do_rc_ack() 994 if (list_empty(&qp->timerwait)) do_rc_ack() 995 list_add_tail(&qp->timerwait, do_rc_ack() 1003 if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1004 reset_psn(qp, psn + 1); do_rc_ack() 1005 ipath_schedule_send(qp); do_rc_ack() 1007 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) { do_rc_ack() 1008 qp->s_state = OP(SEND_LAST); do_rc_ack() 1009 qp->s_psn = psn + 1; do_rc_ack() 1011 ipath_get_credit(qp, aeth); do_rc_ack() 1012 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack() 1013 qp->s_retry = qp->s_retry_cnt; do_rc_ack() 1014 update_last_psn(qp, psn); do_rc_ack() 1020 if (qp->s_last == qp->s_tail) do_rc_ack() 1022 if (qp->s_rnr_retry == 0) { do_rc_ack() 1026 if (qp->s_rnr_retry_cnt < 7) do_rc_ack() 1027 qp->s_rnr_retry--; do_rc_ack() 1030 update_last_psn(qp, psn - 1); do_rc_ack() 1036 (qp->s_psn - psn) & IPATH_PSN_MASK; do_rc_ack() 1038 reset_psn(qp, psn); do_rc_ack() 1040 qp->s_rnr_timeout = do_rc_ack() 1043 ipath_insert_rnr_queue(qp); do_rc_ack() 1044 ipath_schedule_send(qp); do_rc_ack() 1048 if (qp->s_last == qp->s_tail) do_rc_ack() 1051 update_last_psn(qp, psn - 1); do_rc_ack() 1062 ipath_restart_rc(qp, psn); do_rc_ack() 1079 ipath_send_complete(qp, wqe, status); do_rc_ack() 1080 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); do_rc_ack() 1087 qp->s_rnr_retry = qp->s_rnr_retry_cnt; do_rc_ack() 1106 * @qp: the QP for this packet 1120 struct ipath_qp *qp, ipath_rc_rcv_resp() 1133 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_rcv_resp() 1136 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_rc_rcv_resp() 1140 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) ipath_rc_rcv_resp() 1144 diff = ipath_cmp24(psn, qp->s_last_psn); ipath_rc_rcv_resp() 1155 ipath_get_credit(qp, aeth); ipath_rc_rcv_resp() 1160 if (unlikely(qp->s_last == qp->s_tail)) ipath_rc_rcv_resp() 1162 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp() 1185 if (!do_rc_ack(qp, aeth, psn, opcode, val) || ipath_rc_rcv_resp() 1189 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp() 1192 qp->r_flags &= ~IPATH_R_RDMAR_SEQ; ipath_rc_rcv_resp() 1198 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, ipath_rc_rcv_resp() 1204 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP() 1206 if (qp->r_flags & IPATH_R_RDMAR_SEQ) OP() 1208 qp->r_flags |= IPATH_R_RDMAR_SEQ; OP() 1209 ipath_restart_rc(qp, qp->s_last_psn + 1); OP() 1217 if (unlikely(pmtu >= qp->s_rdma_read_len)) 1222 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait)) 1223 list_move_tail(&qp->timerwait, 1228 qp->s_retry = qp->s_retry_cnt; 1234 qp->s_rdma_read_len -= pmtu; 1235 update_last_psn(qp, psn); 1236 spin_unlock_irqrestore(&qp->s_lock, flags); 1237 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu); 1245 if (!do_rc_ack(qp, aeth, psn, opcode, 0)) 1261 wqe = get_swqe_ptr(qp, qp->s_last); 1262 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, 1268 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { OP() 1270 if (qp->r_flags & IPATH_R_RDMAR_SEQ) OP() 1272 qp->r_flags |= IPATH_R_RDMAR_SEQ; OP() 1273 ipath_restart_rc(qp, qp->s_last_psn + 1); OP() 1289 if (unlikely(tlen != qp->s_rdma_read_len)) 1297 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen); 1298 (void) do_rc_ack(qp, aeth, psn, 1310 ipath_send_complete(qp, wqe, status); 1311 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1313 spin_unlock_irqrestore(&qp->s_lock, flags); 1323 * @qp: the QP for this packet 1338 struct ipath_qp *qp, ipath_rc_rcv_error() 1355 if (!qp->r_nak_state) { ipath_rc_rcv_error() 1356 qp->r_nak_state = IB_NAK_PSN_ERROR; ipath_rc_rcv_error() 1358 qp->r_ack_psn = qp->r_psn; ipath_rc_rcv_error() 1382 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_rcv_error() 1384 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) ipath_rc_rcv_error() 1387 for (i = qp->r_head_ack_queue; ; i = prev) { ipath_rc_rcv_error() 1388 if (i == qp->s_tail_ack_queue) ipath_rc_rcv_error() 1394 if (prev == qp->r_head_ack_queue) { ipath_rc_rcv_error() 1398 e = &qp->s_ack_queue[prev]; ipath_rc_rcv_error() 1404 if (prev == qp->s_tail_ack_queue) ipath_rc_rcv_error() 1437 ib_mtu_enum_to_int(qp->path_mtu); OP() 1446 ok = ipath_rkey_ok(qp, &e->rdma_sge, OP() 1460 qp->s_ack_state = OP(ACKNOWLEDGE); OP() 1461 qp->s_tail_ack_queue = prev; OP() 1474 qp->s_ack_state = OP(ACKNOWLEDGE); OP() 1475 qp->s_tail_ack_queue = prev; OP() 1486 if (i == qp->r_head_ack_queue) { 1487 spin_unlock_irqrestore(&qp->s_lock, flags); 1488 qp->r_nak_state = 0; 1489 qp->r_ack_psn = qp->r_psn - 1; 1497 if (qp->r_head_ack_queue == qp->s_tail_ack_queue && 1498 !(qp->s_flags & IPATH_S_ACK_PENDING) && 1499 qp->s_ack_state == OP(ACKNOWLEDGE)) { 1500 spin_unlock_irqrestore(&qp->s_lock, flags); 1501 qp->r_nak_state = 0; 1502 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; 1509 qp->s_ack_state = OP(ACKNOWLEDGE); 1510 qp->s_tail_ack_queue = i; 1513 qp->r_nak_state = 0; 1514 ipath_schedule_send(qp); 1517 spin_unlock_irqrestore(&qp->s_lock, flags); 1525 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) ipath_rc_error() argument 1530 spin_lock_irqsave(&qp->s_lock, flags); ipath_rc_error() 1531 lastwqe = ipath_error_qp(qp, err); ipath_rc_error() 1532 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_rc_error() 1537 ev.device = qp->ibqp.device; ipath_rc_error() 1538 ev.element.qp = &qp->ibqp; ipath_rc_error() 1540 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); ipath_rc_error() 1544 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n) ipath_update_ack_queue() argument 1551 if (n == qp->s_tail_ack_queue) { ipath_update_ack_queue() 1552 qp->s_tail_ack_queue = next; ipath_update_ack_queue() 1553 qp->s_ack_state = OP(ACKNOWLEDGE); ipath_update_ack_queue() 1564 * @qp: the QP for this packet 1571 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_rc_rcv() 1579 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_rc_rcv() 1586 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) ipath_rc_rcv() 1620 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn, ipath_rc_rcv() 1626 diff = ipath_cmp24(psn, qp->r_psn); ipath_rc_rcv() 1628 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode, ipath_rc_rcv() 1635 switch (qp->r_state) { ipath_rc_rcv() 1673 if (!ipath_get_rwqe(qp, 0)) ipath_rc_rcv() 1675 qp->r_rcv_len = 0; ipath_rc_rcv() 1683 qp->r_rcv_len += pmtu; ipath_rc_rcv() 1684 if (unlikely(qp->r_rcv_len > qp->r_len)) ipath_rc_rcv() 1686 ipath_copy_sge(&qp->r_sge, data, pmtu); ipath_rc_rcv() 1691 if (!ipath_get_rwqe(qp, 1)) ipath_rc_rcv() 1697 if (!ipath_get_rwqe(qp, 0)) ipath_rc_rcv() 1699 qp->r_rcv_len = 0; ipath_rc_rcv() 1726 wc.byte_len = tlen + qp->r_rcv_len; 1727 if (unlikely(wc.byte_len > qp->r_len)) 1729 ipath_copy_sge(&qp->r_sge, data, tlen); 1730 qp->r_msn++; 1731 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) 1733 wc.wr_id = qp->r_wr_id; 1740 wc.qp = &qp->ibqp; 1741 wc.src_qp = qp->remote_qpn; 1742 wc.slid = qp->remote_ah_attr.dlid; 1743 wc.sl = qp->remote_ah_attr.sl; 1745 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1753 if (unlikely(!(qp->qp_access_flags & 1765 qp->r_len = be32_to_cpu(reth->length); 1766 qp->r_rcv_len = 0; 1767 if (qp->r_len != 0) { 1773 ok = ipath_rkey_ok(qp, &qp->r_sge, 1774 qp->r_len, vaddr, rkey, 1779 qp->r_sge.sg_list = NULL; 1780 qp->r_sge.sge.mr = NULL; 1781 qp->r_sge.sge.vaddr = NULL; 1782 qp->r_sge.sge.length = 0; 1783 qp->r_sge.sge.sge_length = 0; 1789 if (!ipath_get_rwqe(qp, 1)) 1798 if (unlikely(!(qp->qp_access_flags & OP() 1801 next = qp->r_head_ack_queue + 1; OP() 1804 spin_lock_irqsave(&qp->s_lock, flags); OP() 1806 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) OP() 1808 if (unlikely(next == qp->s_tail_ack_queue)) { OP() 1809 if (!qp->s_ack_queue[next].sent) OP() 1811 ipath_update_ack_queue(qp, next); OP() 1813 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP() 1828 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr, OP() 1837 qp->r_psn += (len - 1) / pmtu; OP() 1854 qp->r_msn++; OP() 1855 qp->r_psn++; OP() 1856 qp->r_state = opcode; OP() 1857 qp->r_nak_state = 0; OP() 1858 qp->r_head_ack_queue = next; OP() 1861 ipath_schedule_send(qp); OP() 1876 if (unlikely(!(qp->qp_access_flags & OP() 1879 next = qp->r_head_ack_queue + 1; OP() 1882 spin_lock_irqsave(&qp->s_lock, flags); OP() 1884 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) OP() 1886 if (unlikely(next == qp->s_tail_ack_queue)) { OP() 1887 if (!qp->s_ack_queue[next].sent) OP() 1889 ipath_update_ack_queue(qp, next); OP() 1901 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, OP() 1906 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; OP() 1908 e = &qp->s_ack_queue[qp->r_head_ack_queue]; OP() 1911 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, OP() 1917 qp->r_msn++; OP() 1918 qp->r_psn++; OP() 1919 qp->r_state = opcode; OP() 1920 qp->r_nak_state = 0; OP() 1921 qp->r_head_ack_queue = next; OP() 1924 ipath_schedule_send(qp); OP() 1933 qp->r_psn++; 1934 qp->r_state = opcode; 1935 qp->r_ack_psn = psn; 1936 qp->r_nak_state = 0; 1943 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 1944 qp->r_ack_psn = qp->r_psn; 1948 spin_unlock_irqrestore(&qp->s_lock, flags); 1950 ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 1951 qp->r_nak_state = IB_NAK_INVALID_REQUEST; 1952 qp->r_ack_psn = qp->r_psn; 1956 spin_unlock_irqrestore(&qp->s_lock, flags); 1958 ipath_rc_error(qp, IB_WC_LOC_PROT_ERR); 1959 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 1960 qp->r_ack_psn = qp->r_psn; 1962 send_rc_ack(qp); 1966 spin_unlock_irqrestore(&qp->s_lock, flags); 1117 ipath_rc_rcv_resp(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, u32 tlen, struct ipath_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, int header_in_data) ipath_rc_rcv_resp() argument 1335 ipath_rc_rcv_error(struct ipath_ibdev *dev, struct ipath_other_headers *ohdr, void *data, struct ipath_qp *qp, u32 opcode, u32 psn, int diff, int header_in_data) ipath_rc_rcv_error() argument 1570 ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_rc_rcv() argument
|
H A D | ipath_uc.c | 42 * @qp: a pointer to the QP 46 int ipath_make_uc_req(struct ipath_qp *qp) ipath_make_uc_req() argument 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_make_uc_req() 57 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_uc_req() 59 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { ipath_make_uc_req() 60 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_uc_req() 63 if (qp->s_last == qp->s_head) ipath_make_uc_req() 66 if (atomic_read(&qp->s_dma_busy)) { ipath_make_uc_req() 67 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_uc_req() 70 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_uc_req() 75 ohdr = &qp->s_hdr.u.oth; ipath_make_uc_req() 76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ipath_make_uc_req() 77 ohdr = &qp->s_hdr.u.l.oth; ipath_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_uc_req() 85 qp->s_wqe = NULL; ipath_make_uc_req() 86 switch (qp->s_state) { ipath_make_uc_req() 88 if (!(ib_ipath_state_ops[qp->state] & ipath_make_uc_req() 92 if (qp->s_cur == qp->s_head) ipath_make_uc_req() 97 qp->s_psn = wqe->psn = qp->s_next_psn; ipath_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_uc_req() 101 qp->s_len = len = wqe->length; ipath_make_uc_req() 106 qp->s_state = OP(SEND_FIRST); ipath_make_uc_req() 111 qp->s_state = OP(SEND_ONLY); ipath_make_uc_req() 113 qp->s_state = ipath_make_uc_req() 121 qp->s_wqe = wqe; ipath_make_uc_req() 122 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req() 123 qp->s_cur = 0; ipath_make_uc_req() 135 qp->s_state = OP(RDMA_WRITE_FIRST); ipath_make_uc_req() 140 qp->s_state = OP(RDMA_WRITE_ONLY); ipath_make_uc_req() 142 qp->s_state = ipath_make_uc_req() 150 qp->s_wqe = wqe; ipath_make_uc_req() 151 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req() 152 qp->s_cur = 0; ipath_make_uc_req() 161 qp->s_state = OP(SEND_MIDDLE); ipath_make_uc_req() 164 len = qp->s_len; ipath_make_uc_req() 170 qp->s_state = OP(SEND_LAST); ipath_make_uc_req() 172 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); ipath_make_uc_req() 179 qp->s_wqe = wqe; ipath_make_uc_req() 180 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req() 181 qp->s_cur = 0; ipath_make_uc_req() 185 qp->s_state = OP(RDMA_WRITE_MIDDLE); ipath_make_uc_req() 188 len = qp->s_len; ipath_make_uc_req() 194 qp->s_state = OP(RDMA_WRITE_LAST); ipath_make_uc_req() 196 qp->s_state = ipath_make_uc_req() 204 qp->s_wqe = wqe; ipath_make_uc_req() 205 if (++qp->s_cur >= qp->s_size) ipath_make_uc_req() 206 qp->s_cur = 0; ipath_make_uc_req() 209 qp->s_len -= len; ipath_make_uc_req() 210 qp->s_hdrwords = hwords; ipath_make_uc_req() 211 qp->s_cur_sge = &qp->s_sge; ipath_make_uc_req() 212 qp->s_cur_size = len; ipath_make_uc_req() 213 ipath_make_ruc_header(to_idev(qp->ibqp.device), ipath_make_uc_req() 214 qp, ohdr, bth0 | (qp->s_state << 24), ipath_make_uc_req() 215 qp->s_next_psn++ & IPATH_PSN_MASK); ipath_make_uc_req() 221 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_uc_req() 223 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_uc_req() 234 * @qp: the QP for this packet. 241 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_uc_rcv() 249 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); ipath_uc_rcv() 254 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) ipath_uc_rcv() 288 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { ipath_uc_rcv() 293 qp->r_psn = psn; ipath_uc_rcv() 295 qp->r_state = OP(SEND_LAST); ipath_uc_rcv() 314 switch (qp->r_state) { ipath_uc_rcv() 348 if (qp->r_flags & IPATH_R_REUSE_SGE) { OP() 349 qp->r_flags &= ~IPATH_R_REUSE_SGE; OP() 350 qp->r_sge = qp->s_rdma_read_sge; OP() 351 } else if (!ipath_get_rwqe(qp, 0)) { 356 qp->s_rdma_read_sge = qp->r_sge; 357 qp->r_rcv_len = 0; 366 qp->r_flags |= IPATH_R_REUSE_SGE; OP() 370 qp->r_rcv_len += pmtu; 371 if (unlikely(qp->r_rcv_len > qp->r_len)) { 372 qp->r_flags |= IPATH_R_REUSE_SGE; 376 ipath_copy_sge(&qp->r_sge, data, pmtu); 398 qp->r_flags |= IPATH_R_REUSE_SGE; 404 wc.byte_len = tlen + qp->r_rcv_len; 405 if (unlikely(wc.byte_len > qp->r_len)) { 406 qp->r_flags |= IPATH_R_REUSE_SGE; 412 ipath_copy_sge(&qp->r_sge, data, tlen); 413 wc.wr_id = qp->r_wr_id; 415 wc.qp = &qp->ibqp; 416 wc.src_qp = qp->remote_qpn; 417 wc.slid = qp->remote_ah_attr.dlid; 418 wc.sl = qp->remote_ah_attr.sl; 420 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 437 qp->r_len = be32_to_cpu(reth->length); 438 qp->r_rcv_len = 0; 439 if (qp->r_len != 0) { 445 ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len, 453 qp->r_sge.sg_list = NULL; 454 qp->r_sge.sge.mr = NULL; 455 qp->r_sge.sge.vaddr = NULL; 456 qp->r_sge.sge.length = 0; 457 qp->r_sge.sge.sge_length = 0; 459 if (unlikely(!(qp->qp_access_flags & 475 qp->r_rcv_len += pmtu; 476 if (unlikely(qp->r_rcv_len > qp->r_len)) { 480 ipath_copy_sge(&qp->r_sge, data, pmtu); 505 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { 509 if (qp->r_flags & IPATH_R_REUSE_SGE) 510 qp->r_flags &= ~IPATH_R_REUSE_SGE; 511 else if (!ipath_get_rwqe(qp, 1)) { 515 wc.byte_len = qp->r_len; 531 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { 535 ipath_copy_sge(&qp->r_sge, data, tlen); 543 qp->r_psn++; 544 qp->r_state = opcode; 240 ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_uc_rcv() argument
|
H A D | ipath_qp.c | 203 * @qp: the QP 209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, ipath_alloc_qpn() argument 218 qp->ibqp.qp_num = ret; ipath_alloc_qpn() 224 qp->next = qpt->table[ret]; ipath_alloc_qpn() 225 qpt->table[ret] = qp; ipath_alloc_qpn() 226 atomic_inc(&qp->refcount); ipath_alloc_qpn() 238 * @qp: the QP to remove 243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) ipath_free_qp() argument 251 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; ipath_free_qp() 253 if (q == qp) { ipath_free_qp() 254 *qpp = qp->next; ipath_free_qp() 255 qp->next = NULL; ipath_free_qp() 256 atomic_dec(&qp->refcount); ipath_free_qp() 274 struct ipath_qp *qp; ipath_free_all_qps() local 279 qp = qpt->table[n]; ipath_free_all_qps() 282 for (; qp; qp = qp->next) ipath_free_all_qps() 304 struct ipath_qp *qp; ipath_lookup_qpn() local 308 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { ipath_lookup_qpn() 309 if (qp->ibqp.qp_num == qpn) { ipath_lookup_qpn() 310 atomic_inc(&qp->refcount); ipath_lookup_qpn() 316 return qp; ipath_lookup_qpn() 321 * @qp: the QP to reset 324 static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) ipath_reset_qp() argument 326 qp->remote_qpn = 0; ipath_reset_qp() 327 qp->qkey = 0; ipath_reset_qp() 328 qp->qp_access_flags = 0; ipath_reset_qp() 329 atomic_set(&qp->s_dma_busy, 0); ipath_reset_qp() 330 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; ipath_reset_qp() 331 qp->s_hdrwords = 0; ipath_reset_qp() 332 qp->s_wqe = NULL; ipath_reset_qp() 333 qp->s_pkt_delay = 0; ipath_reset_qp() 334 qp->s_draining = 0; ipath_reset_qp() 335 qp->s_psn = 0; ipath_reset_qp() 336 qp->r_psn = 0; ipath_reset_qp() 337 qp->r_msn = 0; ipath_reset_qp() 339 qp->s_state = IB_OPCODE_RC_SEND_LAST; ipath_reset_qp() 340 qp->r_state = IB_OPCODE_RC_SEND_LAST; ipath_reset_qp() 342 qp->s_state = IB_OPCODE_UC_SEND_LAST; ipath_reset_qp() 343 qp->r_state = IB_OPCODE_UC_SEND_LAST; ipath_reset_qp() 345 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; ipath_reset_qp() 346 qp->r_nak_state = 0; ipath_reset_qp() 347 qp->r_aflags = 0; ipath_reset_qp() 348 qp->r_flags = 0; ipath_reset_qp() 349 qp->s_rnr_timeout = 0; ipath_reset_qp() 350 qp->s_head = 0; ipath_reset_qp() 351 qp->s_tail = 0; ipath_reset_qp() 352 qp->s_cur = 0; ipath_reset_qp() 353 qp->s_last = 0; ipath_reset_qp() 354 qp->s_ssn = 1; ipath_reset_qp() 355 qp->s_lsn = 0; ipath_reset_qp() 356 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); ipath_reset_qp() 357 qp->r_head_ack_queue = 0; ipath_reset_qp() 358 qp->s_tail_ack_queue = 0; ipath_reset_qp() 359 qp->s_num_rd_atomic = 0; ipath_reset_qp() 360 if (qp->r_rq.wq) { ipath_reset_qp() 361 qp->r_rq.wq->head = 0; ipath_reset_qp() 362 qp->r_rq.wq->tail = 0; ipath_reset_qp() 368 * @qp: the QP to put into the error state 377 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) ipath_error_qp() argument 379 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_error_qp() 383 if (qp->state == IB_QPS_ERR) ipath_error_qp() 386 qp->state = IB_QPS_ERR; ipath_error_qp() 389 if (!list_empty(&qp->timerwait)) ipath_error_qp() 390 list_del_init(&qp->timerwait); ipath_error_qp() 391 if (!list_empty(&qp->piowait)) ipath_error_qp() 392 list_del_init(&qp->piowait); ipath_error_qp() 396 if (qp->s_last != qp->s_head) ipath_error_qp() 397 ipath_schedule_send(qp); ipath_error_qp() 400 wc.qp = &qp->ibqp; ipath_error_qp() 403 if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { ipath_error_qp() 404 wc.wr_id = qp->r_wr_id; ipath_error_qp() 406 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_error_qp() 410 if (qp->r_rq.wq) { ipath_error_qp() 415 spin_lock(&qp->r_rq.lock); ipath_error_qp() 418 wq = qp->r_rq.wq; ipath_error_qp() 420 if (head >= qp->r_rq.size) ipath_error_qp() 423 if (tail >= qp->r_rq.size) ipath_error_qp() 426 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; ipath_error_qp() 427 if (++tail >= qp->r_rq.size) ipath_error_qp() 429 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_error_qp() 433 spin_unlock(&qp->r_rq.lock); ipath_error_qp() 434 } else if (qp->ibqp.event_handler) ipath_error_qp() 454 struct ipath_qp *qp = to_iqp(ibqp); ipath_modify_qp() local 459 spin_lock_irq(&qp->s_lock); ipath_modify_qp() 462 attr->cur_qp_state : qp->state; ipath_modify_qp() 512 if (qp->state != IB_QPS_RESET) { ipath_modify_qp() 513 qp->state = IB_QPS_RESET; ipath_modify_qp() 515 if (!list_empty(&qp->timerwait)) ipath_modify_qp() 516 list_del_init(&qp->timerwait); ipath_modify_qp() 517 if (!list_empty(&qp->piowait)) ipath_modify_qp() 518 list_del_init(&qp->piowait); ipath_modify_qp() 520 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_modify_qp() 521 spin_unlock_irq(&qp->s_lock); ipath_modify_qp() 523 tasklet_kill(&qp->s_task); ipath_modify_qp() 524 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); ipath_modify_qp() 525 spin_lock_irq(&qp->s_lock); ipath_modify_qp() 527 ipath_reset_qp(qp, ibqp->qp_type); ipath_modify_qp() 531 qp->s_draining = qp->s_last != qp->s_cur; ipath_modify_qp() 532 qp->state = new_state; ipath_modify_qp() 536 if (qp->ibqp.qp_type == IB_QPT_RC) ipath_modify_qp() 538 qp->state = new_state; ipath_modify_qp() 542 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); ipath_modify_qp() 546 qp->state = new_state; ipath_modify_qp() 551 qp->s_pkey_index = attr->pkey_index; ipath_modify_qp() 554 qp->remote_qpn = attr->dest_qp_num; ipath_modify_qp() 557 qp->s_psn = qp->s_next_psn = attr->sq_psn; ipath_modify_qp() 558 qp->s_last_psn = qp->s_next_psn - 1; ipath_modify_qp() 562 qp->r_psn = attr->rq_psn; ipath_modify_qp() 565 qp->qp_access_flags = attr->qp_access_flags; ipath_modify_qp() 568 qp->remote_ah_attr = attr->ah_attr; ipath_modify_qp() 569 qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); ipath_modify_qp() 573 qp->path_mtu = attr->path_mtu; ipath_modify_qp() 576 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; ipath_modify_qp() 579 qp->s_rnr_retry = attr->rnr_retry; ipath_modify_qp() 580 if (qp->s_rnr_retry > 7) ipath_modify_qp() 581 qp->s_rnr_retry = 7; ipath_modify_qp() 582 qp->s_rnr_retry_cnt = qp->s_rnr_retry; ipath_modify_qp() 586 qp->r_min_rnr_timer = attr->min_rnr_timer; ipath_modify_qp() 589 qp->timeout = attr->timeout; ipath_modify_qp() 592 qp->qkey = attr->qkey; ipath_modify_qp() 595 qp->r_max_rd_atomic = attr->max_dest_rd_atomic; ipath_modify_qp() 598 qp->s_max_rd_atomic = attr->max_rd_atomic; ipath_modify_qp() 600 spin_unlock_irq(&qp->s_lock); ipath_modify_qp() 605 ev.device = qp->ibqp.device; ipath_modify_qp() 606 ev.element.qp = &qp->ibqp; ipath_modify_qp() 608 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); ipath_modify_qp() 614 spin_unlock_irq(&qp->s_lock); ipath_modify_qp() 624 struct ipath_qp *qp = to_iqp(ibqp); ipath_query_qp() local 626 attr->qp_state = qp->state; ipath_query_qp() 628 attr->path_mtu = qp->path_mtu; ipath_query_qp() 630 attr->qkey = qp->qkey; ipath_query_qp() 631 attr->rq_psn = qp->r_psn; ipath_query_qp() 632 attr->sq_psn = qp->s_next_psn; ipath_query_qp() 633 attr->dest_qp_num = qp->remote_qpn; ipath_query_qp() 634 attr->qp_access_flags = qp->qp_access_flags; ipath_query_qp() 635 attr->cap.max_send_wr = qp->s_size - 1; ipath_query_qp() 636 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; ipath_query_qp() 637 attr->cap.max_send_sge = qp->s_max_sge; ipath_query_qp() 638 attr->cap.max_recv_sge = qp->r_rq.max_sge; ipath_query_qp() 640 attr->ah_attr = qp->remote_ah_attr; ipath_query_qp() 642 attr->pkey_index = qp->s_pkey_index; ipath_query_qp() 645 attr->sq_draining = qp->s_draining; ipath_query_qp() 646 attr->max_rd_atomic = qp->s_max_rd_atomic; ipath_query_qp() 647 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; ipath_query_qp() 648 attr->min_rnr_timer = qp->r_min_rnr_timer; ipath_query_qp() 650 attr->timeout = qp->timeout; ipath_query_qp() 651 attr->retry_cnt = qp->s_retry_cnt; ipath_query_qp() 652 attr->rnr_retry = qp->s_rnr_retry_cnt; ipath_query_qp() 656 init_attr->event_handler = qp->ibqp.event_handler; ipath_query_qp() 657 init_attr->qp_context = qp->ibqp.qp_context; ipath_query_qp() 658 init_attr->send_cq = qp->ibqp.send_cq; ipath_query_qp() 659 init_attr->recv_cq = qp->ibqp.recv_cq; ipath_query_qp() 660 init_attr->srq = qp->ibqp.srq; ipath_query_qp() 662 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ipath_query_qp() 666 init_attr->qp_type = qp->ibqp.qp_type; ipath_query_qp() 673 * @qp: the queue pair to compute the AETH for 677 __be32 ipath_compute_aeth(struct ipath_qp *qp) ipath_compute_aeth() argument 679 u32 aeth = qp->r_msn & IPATH_MSN_MASK; ipath_compute_aeth() 681 if (qp->ibqp.srq) { ipath_compute_aeth() 690 struct ipath_rwq *wq = qp->r_rq.wq; ipath_compute_aeth() 696 if (head >= qp->r_rq.size) ipath_compute_aeth() 699 if (tail >= qp->r_rq.size) ipath_compute_aeth() 708 credits += qp->r_rq.size; ipath_compute_aeth() 745 struct ipath_qp *qp; ipath_create_qp() local 794 sz = sizeof(*qp); ipath_create_qp() 800 sg_list_sz = sizeof(*qp->r_sg_list) * ipath_create_qp() 803 sg_list_sz = sizeof(*qp->r_sg_list) * ipath_create_qp() 805 qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); ipath_create_qp() 806 if (!qp) { ipath_create_qp() 813 qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); ipath_create_qp() 814 if (!qp->r_ud_sg_list) { ipath_create_qp() 819 qp->r_ud_sg_list = NULL; ipath_create_qp() 822 qp->r_rq.size = 0; ipath_create_qp() 823 qp->r_rq.max_sge = 0; ipath_create_qp() 824 qp->r_rq.wq = NULL; ipath_create_qp() 828 qp->r_rq.size = init_attr->cap.max_recv_wr + 1; ipath_create_qp() 829 qp->r_rq.max_sge = init_attr->cap.max_recv_sge; ipath_create_qp() 830 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + ipath_create_qp() 832 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + ipath_create_qp() 833 qp->r_rq.size * sz); ipath_create_qp() 834 if (!qp->r_rq.wq) { ipath_create_qp() 841 * ib_create_qp() will initialize qp->ibqp ipath_create_qp() 842 * except for qp->ibqp.qp_num. ipath_create_qp() 844 spin_lock_init(&qp->s_lock); ipath_create_qp() 845 spin_lock_init(&qp->r_rq.lock); ipath_create_qp() 846 atomic_set(&qp->refcount, 0); ipath_create_qp() 847 init_waitqueue_head(&qp->wait); ipath_create_qp() 848 init_waitqueue_head(&qp->wait_dma); ipath_create_qp() 849 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); ipath_create_qp() 850 INIT_LIST_HEAD(&qp->piowait); ipath_create_qp() 851 INIT_LIST_HEAD(&qp->timerwait); ipath_create_qp() 852 qp->state = IB_QPS_RESET; ipath_create_qp() 853 qp->s_wq = swq; ipath_create_qp() 854 qp->s_size = init_attr->cap.max_send_wr + 1; ipath_create_qp() 855 qp->s_max_sge = init_attr->cap.max_send_sge; ipath_create_qp() 857 qp->s_flags = IPATH_S_SIGNAL_REQ_WR; ipath_create_qp() 859 qp->s_flags = 0; ipath_create_qp() 861 err = ipath_alloc_qpn(&dev->qp_table, qp, ipath_create_qp() 865 vfree(qp->r_rq.wq); ipath_create_qp() 868 qp->ip = NULL; ipath_create_qp() 869 qp->s_tx = NULL; ipath_create_qp() 870 ipath_reset_qp(qp, init_attr->qp_type); ipath_create_qp() 886 if (!qp->r_rq.wq) { ipath_create_qp() 897 qp->r_rq.size * sz; ipath_create_qp() 899 qp->ip = ipath_create_qp() 902 qp->r_rq.wq); ipath_create_qp() 903 if (!qp->ip) { ipath_create_qp() 908 err = ib_copy_to_udata(udata, &(qp->ip->offset), ipath_create_qp() 909 sizeof(qp->ip->offset)); ipath_create_qp() 927 if (qp->ip) { ipath_create_qp() 929 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); ipath_create_qp() 933 ret = &qp->ibqp; ipath_create_qp() 937 if (qp->ip) ipath_create_qp() 938 kref_put(&qp->ip->ref, ipath_release_mmap_info); ipath_create_qp() 940 vfree(qp->r_rq.wq); ipath_create_qp() 941 ipath_free_qp(&dev->qp_table, qp); ipath_create_qp() 942 free_qpn(&dev->qp_table, qp->ibqp.qp_num); ipath_create_qp() 944 kfree(qp->r_ud_sg_list); ipath_create_qp() 946 kfree(qp); ipath_create_qp() 964 struct ipath_qp *qp = to_iqp(ibqp); ipath_destroy_qp() local 968 spin_lock_irq(&qp->s_lock); ipath_destroy_qp() 969 if (qp->state != IB_QPS_RESET) { ipath_destroy_qp() 970 qp->state = IB_QPS_RESET; ipath_destroy_qp() 972 if (!list_empty(&qp->timerwait)) ipath_destroy_qp() 973 list_del_init(&qp->timerwait); ipath_destroy_qp() 974 if (!list_empty(&qp->piowait)) ipath_destroy_qp() 975 list_del_init(&qp->piowait); ipath_destroy_qp() 977 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_destroy_qp() 978 spin_unlock_irq(&qp->s_lock); ipath_destroy_qp() 980 tasklet_kill(&qp->s_task); ipath_destroy_qp() 981 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); ipath_destroy_qp() 983 spin_unlock_irq(&qp->s_lock); ipath_destroy_qp() 985 ipath_free_qp(&dev->qp_table, qp); ipath_destroy_qp() 987 if (qp->s_tx) { ipath_destroy_qp() 988 atomic_dec(&qp->refcount); ipath_destroy_qp() 989 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) ipath_destroy_qp() 990 kfree(qp->s_tx->txreq.map_addr); ipath_destroy_qp() 992 list_add(&qp->s_tx->txreq.list, &dev->txreq_free); ipath_destroy_qp() 994 qp->s_tx = NULL; ipath_destroy_qp() 997 wait_event(qp->wait, !atomic_read(&qp->refcount)); ipath_destroy_qp() 1000 free_qpn(&dev->qp_table, qp->ibqp.qp_num); ipath_destroy_qp() 1005 if (qp->ip) ipath_destroy_qp() 1006 kref_put(&qp->ip->ref, ipath_release_mmap_info); ipath_destroy_qp() 1008 vfree(qp->r_rq.wq); ipath_destroy_qp() 1009 kfree(qp->r_ud_sg_list); ipath_destroy_qp() 1010 vfree(qp->s_wq); ipath_destroy_qp() 1011 kfree(qp); ipath_destroy_qp() 1050 * @qp: the qp who's send work queue to flush 1055 void ipath_get_credit(struct ipath_qp *qp, u32 aeth) ipath_get_credit() argument 1065 qp->s_lsn = (u32) -1; ipath_get_credit() 1066 else if (qp->s_lsn != (u32) -1) { ipath_get_credit() 1069 if (ipath_cmp24(credit, qp->s_lsn) > 0) ipath_get_credit() 1070 qp->s_lsn = credit; ipath_get_credit() 1074 if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && ipath_get_credit() 1075 qp->s_cur != qp->s_head && ipath_get_credit() 1076 (qp->s_lsn == (u32) -1 || ipath_get_credit() 1077 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, ipath_get_credit() 1078 qp->s_lsn + 1) <= 0)) ipath_get_credit() 1079 ipath_schedule_send(qp); ipath_get_credit()
|
H A D | ipath_ruc.c | 80 * @qp: the QP 87 void ipath_insert_rnr_queue(struct ipath_qp *qp) ipath_insert_rnr_queue() argument 89 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_insert_rnr_queue() 94 list_add(&qp->timerwait, &dev->rnrwait); ipath_insert_rnr_queue() 100 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { ipath_insert_rnr_queue() 101 qp->s_rnr_timeout -= nqp->s_rnr_timeout; ipath_insert_rnr_queue() 111 nqp->s_rnr_timeout -= qp->s_rnr_timeout; ipath_insert_rnr_queue() 112 list_add(&qp->timerwait, l); ipath_insert_rnr_queue() 119 * @qp: the QP 123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, ipath_init_sge() argument 134 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge, ipath_init_sge() 149 wc.qp = &qp->ibqp; ipath_init_sge() 151 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); ipath_init_sge() 159 * @qp: the QP 160 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge 166 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) ipath_get_rwqe() argument 177 if (qp->ibqp.srq) { ipath_get_rwqe() 178 srq = to_isrq(qp->ibqp.srq); ipath_get_rwqe() 184 rq = &qp->r_rq; ipath_get_rwqe() 188 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_get_rwqe() 210 qp->r_sge.sg_list = qp->r_sg_list; ipath_get_rwqe() 211 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); ipath_get_rwqe() 212 qp->r_wr_id = wqe->wr_id; ipath_get_rwqe() 216 set_bit(IPATH_R_WRID_VALID, &qp->r_aflags); ipath_get_rwqe() 236 ev.device = qp->ibqp.device; ipath_get_rwqe() 237 ev.element.srq = qp->ibqp.srq; ipath_get_rwqe() 263 struct ipath_qp *qp; ipath_ruc_loopback() local 276 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); ipath_ruc_loopback() 312 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_ruc_loopback() 338 if (!ipath_get_rwqe(qp, 0)) ipath_ruc_loopback() 343 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ipath_ruc_loopback() 347 if (!ipath_get_rwqe(qp, 1)) ipath_ruc_loopback() 351 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) ipath_ruc_loopback() 355 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, ipath_ruc_loopback() 363 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) ipath_ruc_loopback() 365 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, ipath_ruc_loopback() 370 qp->r_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback() 371 qp->r_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback() 372 qp->r_sge.num_sge = wqe->wr.num_sge; ipath_ruc_loopback() 377 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) ipath_ruc_loopback() 379 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), ipath_ruc_loopback() 385 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; ipath_ruc_loopback() 390 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, ipath_ruc_loopback() 408 ipath_copy_sge(&qp->r_sge, sge->vaddr, len); ipath_ruc_loopback() 429 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) ipath_ruc_loopback() 436 wc.wr_id = qp->r_wr_id; ipath_ruc_loopback() 439 wc.qp = &qp->ibqp; ipath_ruc_loopback() 440 wc.src_qp = qp->remote_qpn; ipath_ruc_loopback() 441 wc.slid = qp->remote_ah_attr.dlid; ipath_ruc_loopback() 442 wc.sl = qp->remote_ah_attr.sl; ipath_ruc_loopback() 445 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ruc_loopback() 457 if (qp->ibqp.qp_type == IB_QPT_UC) ipath_ruc_loopback() 474 sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer]; ipath_ruc_loopback() 488 ipath_rc_error(qp, wc.status); ipath_ruc_loopback() 502 ev.element.qp = &sqp->ibqp; ipath_ruc_loopback() 513 if (qp && atomic_dec_and_test(&qp->refcount)) ipath_ruc_loopback() 514 wake_up(&qp->wait); ipath_ruc_loopback() 517 static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) want_buffer() argument 520 qp->ibqp.qp_type == IB_QPT_SMI) { want_buffer() 534 * @qp: the QP that caused the problem 541 static int ipath_no_bufs_available(struct ipath_qp *qp, ipath_no_bufs_available() argument 553 spin_lock_irqsave(&qp->s_lock, flags); ipath_no_bufs_available() 554 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { ipath_no_bufs_available() 556 qp->s_flags |= IPATH_S_WAITING; ipath_no_bufs_available() 557 qp->s_flags &= ~IPATH_S_BUSY; ipath_no_bufs_available() 559 if (list_empty(&qp->piowait)) ipath_no_bufs_available() 560 list_add_tail(&qp->piowait, &dev->piowait); ipath_no_bufs_available() 564 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_no_bufs_available() 566 want_buffer(dev->dd, qp); ipath_no_bufs_available() 600 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, ipath_make_ruc_header() argument 609 extra_bytes = -qp->s_cur_size & 3; ipath_make_ruc_header() 610 nwords = (qp->s_cur_size + extra_bytes) >> 2; ipath_make_ruc_header() 612 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { ipath_make_ruc_header() 613 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, ipath_make_ruc_header() 614 &qp->remote_ah_attr.grh, ipath_make_ruc_header() 615 qp->s_hdrwords, nwords); ipath_make_ruc_header() 618 lrh0 |= qp->remote_ah_attr.sl << 4; ipath_make_ruc_header() 619 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); ipath_make_ruc_header() 620 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); ipath_make_ruc_header() 621 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); ipath_make_ruc_header() 622 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid | ipath_make_ruc_header() 623 qp->remote_ah_attr.src_path_bits); ipath_make_ruc_header() 624 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); ipath_make_ruc_header() 627 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ipath_make_ruc_header() 641 struct ipath_qp *qp = (struct ipath_qp *)data; ipath_do_send() local 642 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_do_send() 643 int (*make_req)(struct ipath_qp *qp); ipath_do_send() 646 if ((qp->ibqp.qp_type == IB_QPT_RC || ipath_do_send() 647 qp->ibqp.qp_type == IB_QPT_UC) && ipath_do_send() 648 qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { ipath_do_send() 649 ipath_ruc_loopback(qp); ipath_do_send() 653 if (qp->ibqp.qp_type == IB_QPT_RC) ipath_do_send() 655 else if (qp->ibqp.qp_type == IB_QPT_UC) ipath_do_send() 660 spin_lock_irqsave(&qp->s_lock, flags); ipath_do_send() 663 if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || ipath_do_send() 664 !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { ipath_do_send() 665 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_do_send() 669 qp->s_flags |= IPATH_S_BUSY; ipath_do_send() 671 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_do_send() 675 if (qp->s_hdrwords != 0) { ipath_do_send() 680 if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, ipath_do_send() 681 qp->s_cur_sge, qp->s_cur_size)) { ipath_do_send() 682 if (ipath_no_bufs_available(qp, dev)) ipath_do_send() 687 qp->s_hdrwords = 0; ipath_do_send() 690 if (make_req(qp)) ipath_do_send() 699 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, ipath_send_complete() argument 704 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) ipath_send_complete() 708 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || ipath_send_complete() 717 wc.qp = &qp->ibqp; ipath_send_complete() 720 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, ipath_send_complete() 724 old_last = last = qp->s_last; ipath_send_complete() 725 if (++last >= qp->s_size) ipath_send_complete() 727 qp->s_last = last; ipath_send_complete() 728 if (qp->s_cur == old_last) ipath_send_complete() 729 qp->s_cur = last; ipath_send_complete() 730 if (qp->s_tail == old_last) ipath_send_complete() 731 qp->s_tail = last; ipath_send_complete() 732 if (qp->state == IB_QPS_SQD && last == qp->s_cur) ipath_send_complete() 733 qp->s_draining = 0; ipath_send_complete()
|
H A D | ipath_ud.c | 53 struct ipath_qp *qp; ipath_ud_loopback() local 68 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); ipath_ud_loopback() 69 if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_ud_loopback() 79 if (unlikely(qp->ibqp.qp_num && ipath_ud_loopback() 81 sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) { ipath_ud_loopback() 107 if (qp->ibqp.srq) { ipath_ud_loopback() 108 srq = to_isrq(qp->ibqp.srq); ipath_ud_loopback() 114 rq = &qp->r_rq; ipath_ud_loopback() 134 rsge.sg_list = qp->r_ud_sg_list; ipath_ud_loopback() 135 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { ipath_ud_loopback() 169 ev.device = qp->ibqp.device; ipath_ud_loopback() 170 ev.element.srq = qp->ibqp.srq; ipath_ud_loopback() 215 wc.qp = &qp->ibqp; ipath_ud_loopback() 227 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ud_loopback() 230 if (atomic_dec_and_test(&qp->refcount)) ipath_ud_loopback() 231 wake_up(&qp->wait); ipath_ud_loopback() 237 * @qp: the QP 241 int ipath_make_ud_req(struct ipath_qp *qp) ipath_make_ud_req() argument 243 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_make_ud_req() 256 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_ud_req() 258 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { ipath_make_ud_req() 259 if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) ipath_make_ud_req() 262 if (qp->s_last == qp->s_head) ipath_make_ud_req() 265 if (atomic_read(&qp->s_dma_busy)) { ipath_make_ud_req() 266 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_ud_req() 269 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_ud_req() 270 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_ud_req() 274 if (qp->s_cur == qp->s_head) ipath_make_ud_req() 277 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_ud_req() 278 next_cur = qp->s_cur + 1; ipath_make_ud_req() 279 if (next_cur >= qp->s_size) ipath_make_ud_req() 300 if (atomic_read(&qp->s_dma_busy)) { ipath_make_ud_req() 301 qp->s_flags |= IPATH_S_WAIT_DMA; ipath_make_ud_req() 304 qp->s_cur = next_cur; ipath_make_ud_req() 305 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_ud_req() 306 ipath_ud_loopback(qp, wqe); ipath_make_ud_req() 307 spin_lock_irqsave(&qp->s_lock, flags); ipath_make_ud_req() 308 ipath_send_complete(qp, wqe, IB_WC_SUCCESS); ipath_make_ud_req() 313 qp->s_cur = next_cur; ipath_make_ud_req() 318 qp->s_hdrwords = 7; ipath_make_ud_req() 319 qp->s_cur_size = wqe->length; ipath_make_ud_req() 320 qp->s_cur_sge = &qp->s_sge; ipath_make_ud_req() 321 qp->s_dmult = ah_attr->static_rate; ipath_make_ud_req() 322 qp->s_wqe = wqe; ipath_make_ud_req() 323 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_ud_req() 324 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_ud_req() 325 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_ud_req() 329 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, ipath_make_ud_req() 331 qp->s_hdrwords, nwords); ipath_make_ud_req() 333 ohdr = &qp->s_hdr.u.l.oth; ipath_make_ud_req() 341 ohdr = &qp->s_hdr.u.oth; ipath_make_ud_req() 344 qp->s_hdrwords++; ipath_make_ud_req() 350 if (qp->ibqp.qp_type == IB_QPT_SMI) ipath_make_ud_req() 352 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); ipath_make_ud_req() 353 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ ipath_make_ud_req() 354 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + ipath_make_ud_req() 360 qp->s_hdr.lrh[3] = cpu_to_be16(lid); ipath_make_ud_req() 362 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; ipath_make_ud_req() 366 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : ipath_make_ud_req() 367 ipath_get_pkey(dev->dd, qp->s_pkey_index); ipath_make_ud_req() 376 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK); ipath_make_ud_req() 382 qp->qkey : wqe->wr.wr.ud.remote_qkey); ipath_make_ud_req() 383 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); ipath_make_ud_req() 390 qp->s_flags &= ~IPATH_S_BUSY; ipath_make_ud_req() 392 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_make_ud_req() 403 * @qp: the QP the packet came on 410 int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_ud_rcv() 453 if (qp->ibqp.qp_num) { ipath_ud_rcv() 459 if (unlikely(qkey != qp->qkey)) { ipath_ud_rcv() 480 if (qp->ibqp.qp_num > 1 && ipath_ud_rcv() 507 if (unlikely((qp->ibqp.qp_num == 0 && ipath_ud_rcv() 510 (qp->ibqp.qp_num == 1 && ipath_ud_rcv() 526 if (qp->r_flags & IPATH_R_REUSE_SGE) ipath_ud_rcv() 527 qp->r_flags &= ~IPATH_R_REUSE_SGE; ipath_ud_rcv() 528 else if (!ipath_get_rwqe(qp, 0)) { ipath_ud_rcv() 535 if (qp->ibqp.qp_num == 0) ipath_ud_rcv() 542 if (wc.byte_len > qp->r_len) { ipath_ud_rcv() 543 qp->r_flags |= IPATH_R_REUSE_SGE; ipath_ud_rcv() 548 ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, ipath_ud_rcv() 552 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); ipath_ud_rcv() 553 ipath_copy_sge(&qp->r_sge, data, ipath_ud_rcv() 555 if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) ipath_ud_rcv() 557 wc.wr_id = qp->r_wr_id; ipath_ud_rcv() 561 wc.qp = &qp->ibqp; ipath_ud_rcv() 575 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, ipath_ud_rcv() 409 ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_ud_rcv() argument
|
H A D | ipath_verbs.c | 333 * @qp: the QP to post on 336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr) ipath_post_one_send() argument 345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_post_one_send() 347 spin_lock_irqsave(&qp->s_lock, flags); ipath_post_one_send() 349 if (qp->ibqp.qp_type != IB_QPT_SMI && ipath_post_one_send() 356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) ipath_post_one_send() 360 if (wr->num_sge > qp->s_max_sge) ipath_post_one_send() 368 if (qp->ibqp.qp_type == IB_QPT_UC) { ipath_post_one_send() 371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { ipath_post_one_send() 377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) ipath_post_one_send() 386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) ipath_post_one_send() 389 next = qp->s_head + 1; ipath_post_one_send() 390 if (next >= qp->s_size) ipath_post_one_send() 392 if (next == qp->s_last) { ipath_post_one_send() 397 wqe = get_swqe_ptr(qp, qp->s_head); ipath_post_one_send() 409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], ipath_post_one_send() 418 if (qp->ibqp.qp_type == IB_QPT_UC || ipath_post_one_send() 419 qp->ibqp.qp_type == IB_QPT_RC) { ipath_post_one_send() 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) ipath_post_one_send() 424 wqe->ssn = qp->s_ssn++; ipath_post_one_send() 425 qp->s_head = next; ipath_post_one_send() 433 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_post_one_send() 448 struct ipath_qp *qp = to_iqp(ibqp); ipath_post_send() local 452 err = ipath_post_one_send(qp, wr); ipath_post_send() 460 ipath_do_send((unsigned long) qp); ipath_post_send() 477 struct ipath_qp *qp = to_iqp(ibqp); ipath_post_receive() local 478 struct ipath_rwq *wq = qp->r_rq.wq; ipath_post_receive() 483 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { ipath_post_receive() 494 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { ipath_post_receive() 500 spin_lock_irqsave(&qp->r_rq.lock, flags); ipath_post_receive() 502 if (next >= qp->r_rq.size) ipath_post_receive() 505 spin_unlock_irqrestore(&qp->r_rq.lock, flags); ipath_post_receive() 511 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); ipath_post_receive() 519 spin_unlock_irqrestore(&qp->r_rq.lock, flags); ipath_post_receive() 534 * @qp: the QP the packet came on 542 void *data, u32 tlen, struct ipath_qp *qp) ipath_qp_rcv() 545 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { ipath_qp_rcv() 550 switch (qp->ibqp.qp_type) { ipath_qp_rcv() 557 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv() 561 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv() 565 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); ipath_qp_rcv() 588 struct ipath_qp *qp; ipath_ib_rcv() local 644 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); ipath_ib_rcv() 652 qp = ipath_lookup_qpn(&dev->qp_table, qp_num); ipath_ib_rcv() 653 if (qp) { ipath_ib_rcv() 656 tlen, qp); ipath_ib_rcv() 661 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_rcv() 662 wake_up(&qp->wait); ipath_ib_rcv() 682 struct ipath_qp *qp; ipath_ib_timer() local 695 qp = list_entry(last->next, struct ipath_qp, timerwait); ipath_ib_timer() 696 list_del_init(&qp->timerwait); ipath_ib_timer() 697 qp->timer_next = resend; ipath_ib_timer() 698 resend = qp; ipath_ib_timer() 699 atomic_inc(&qp->refcount); ipath_ib_timer() 703 qp = list_entry(last->next, struct ipath_qp, timerwait); ipath_ib_timer() 704 if (--qp->s_rnr_timeout == 0) { ipath_ib_timer() 706 list_del_init(&qp->timerwait); ipath_ib_timer() 707 qp->timer_next = rnr; ipath_ib_timer() 708 rnr = qp; ipath_ib_timer() 709 atomic_inc(&qp->refcount); ipath_ib_timer() 712 qp = list_entry(last->next, struct ipath_qp, ipath_ib_timer() 714 } while (qp->s_rnr_timeout == 0); ipath_ib_timer() 750 qp = resend; ipath_ib_timer() 751 resend = qp->timer_next; ipath_ib_timer() 753 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_timer() 754 if (qp->s_last != qp->s_tail && ipath_ib_timer() 755 ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { ipath_ib_timer() 757 ipath_restart_rc(qp, qp->s_last_psn + 1); ipath_ib_timer() 759 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_timer() 762 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_timer() 763 wake_up(&qp->wait); ipath_ib_timer() 766 qp = rnr; ipath_ib_timer() 767 rnr = qp->timer_next; ipath_ib_timer() 769 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_timer() 770 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_ib_timer() 771 ipath_schedule_send(qp); ipath_ib_timer() 772 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_timer() 775 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_timer() 776 wake_up(&qp->wait); ipath_ib_timer() 1031 struct ipath_qp *qp = tx->qp; sdma_complete() local 1032 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); sdma_complete() 1037 if (atomic_dec_and_test(&qp->s_dma_busy)) { sdma_complete() 1038 spin_lock_irqsave(&qp->s_lock, flags); sdma_complete() 1040 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1041 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && sdma_complete() 1042 qp->s_last != qp->s_head) || sdma_complete() 1043 (qp->s_flags & IPATH_S_WAIT_DMA)) sdma_complete() 1044 ipath_schedule_send(qp); sdma_complete() 1045 spin_unlock_irqrestore(&qp->s_lock, flags); sdma_complete() 1046 wake_up(&qp->wait_dma); sdma_complete() 1048 spin_lock_irqsave(&qp->s_lock, flags); sdma_complete() 1049 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1050 spin_unlock_irqrestore(&qp->s_lock, flags); sdma_complete() 1057 if (atomic_dec_and_test(&qp->refcount)) sdma_complete() 1058 wake_up(&qp->wait); sdma_complete() 1061 static void decrement_dma_busy(struct ipath_qp *qp) decrement_dma_busy() argument 1065 if (atomic_dec_and_test(&qp->s_dma_busy)) { decrement_dma_busy() 1066 spin_lock_irqsave(&qp->s_lock, flags); decrement_dma_busy() 1067 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && decrement_dma_busy() 1068 qp->s_last != qp->s_head) || decrement_dma_busy() 1069 (qp->s_flags & IPATH_S_WAIT_DMA)) decrement_dma_busy() 1070 ipath_schedule_send(qp); decrement_dma_busy() 1071 spin_unlock_irqrestore(&qp->s_lock, flags); decrement_dma_busy() 1072 wake_up(&qp->wait_dma); decrement_dma_busy() 1097 static int ipath_verbs_send_dma(struct ipath_qp *qp, ipath_verbs_send_dma() argument 1102 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_verbs_send_dma() 1110 tx = qp->s_tx; ipath_verbs_send_dma() 1112 qp->s_tx = NULL; ipath_verbs_send_dma() 1114 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma() 1117 qp->s_tx = tx; ipath_verbs_send_dma() 1118 decrement_dma_busy(qp); ipath_verbs_send_dma() 1134 control = qp->s_pkt_delay; ipath_verbs_send_dma() 1135 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); ipath_verbs_send_dma() 1137 tx->qp = qp; ipath_verbs_send_dma() 1138 atomic_inc(&qp->refcount); ipath_verbs_send_dma() 1139 tx->wqe = qp->s_wqe; ipath_verbs_send_dma() 1170 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma() 1176 qp->s_tx = tx; ipath_verbs_send_dma() 1177 decrement_dma_busy(qp); ipath_verbs_send_dma() 1198 atomic_inc(&qp->s_dma_busy); ipath_verbs_send_dma() 1208 qp->s_tx = tx; ipath_verbs_send_dma() 1209 decrement_dma_busy(qp); ipath_verbs_send_dma() 1215 if (atomic_dec_and_test(&qp->refcount)) ipath_verbs_send_dma() 1216 wake_up(&qp->wait); ipath_verbs_send_dma() 1222 static int ipath_verbs_send_pio(struct ipath_qp *qp, ipath_verbs_send_pio() argument 1227 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_verbs_send_pio() 1246 control = qp->s_pkt_delay; ipath_verbs_send_pio() 1247 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); ipath_verbs_send_pio() 1304 if (qp->s_wqe) { ipath_verbs_send_pio() 1305 spin_lock_irqsave(&qp->s_lock, flags); ipath_verbs_send_pio() 1306 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); ipath_verbs_send_pio() 1307 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_verbs_send_pio() 1316 * @qp: the QP to send on 1322 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, ipath_verbs_send() argument 1325 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; ipath_verbs_send() 1341 if (qp->ibqp.qp_type == IB_QPT_SMI || ipath_verbs_send() 1343 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, ipath_verbs_send() 1346 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, ipath_verbs_send() 1461 struct ipath_qp *qp; ipath_ib_piobufavail() local 1472 qp = list_entry(list->next, struct ipath_qp, piowait); ipath_ib_piobufavail() 1473 list_del_init(&qp->piowait); ipath_ib_piobufavail() 1474 qp->pio_next = qplist; ipath_ib_piobufavail() 1475 qplist = qp; ipath_ib_piobufavail() 1476 atomic_inc(&qp->refcount); ipath_ib_piobufavail() 1481 qp = qplist; ipath_ib_piobufavail() 1482 qplist = qp->pio_next; ipath_ib_piobufavail() 1484 spin_lock_irqsave(&qp->s_lock, flags); ipath_ib_piobufavail() 1485 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ipath_ib_piobufavail() 1486 ipath_schedule_send(qp); ipath_ib_piobufavail() 1487 spin_unlock_irqrestore(&qp->s_lock, flags); ipath_ib_piobufavail() 1490 if (atomic_dec_and_test(&qp->refcount)) ipath_ib_piobufavail() 1491 wake_up(&qp->wait); ipath_ib_piobufavail() 540 ipath_qp_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int has_grh, void *data, u32 tlen, struct ipath_qp *qp) ipath_qp_rcv() argument
|
H A D | ipath_verbs_mcast.c | 51 * @qp: the QP to link 53 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) ipath_mcast_qp_alloc() argument 61 mqp->qp = qp; ipath_mcast_qp_alloc() 62 atomic_inc(&qp->refcount); ipath_mcast_qp_alloc() 70 struct ipath_qp *qp = mqp->qp; ipath_mcast_qp_free() local 73 if (atomic_dec_and_test(&qp->refcount)) ipath_mcast_qp_free() 74 wake_up(&qp->wait); ipath_mcast_qp_free() 193 if (p->qp == mqp->qp) { ipath_mcast_add() 238 struct ipath_qp *qp = to_iqp(ibqp); ipath_multicast_attach() local 253 mqp = ipath_mcast_qp_alloc(qp); ipath_multicast_attach() 287 struct ipath_qp *qp = to_iqp(ibqp); ipath_multicast_detach() local 319 if (p->qp != qp) ipath_multicast_detach()
|
H A D | ipath_keys.c | 121 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, ipath_lkey_ok() argument 124 struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; ipath_lkey_ok() 136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_lkey_ok() 151 qp->ibqp.pd != mr->pd)) { ipath_lkey_ok() 199 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, ipath_rkey_ok() argument 202 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); ipath_rkey_ok() 216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); ipath_rkey_ok() 234 qp->ibqp.pd != mr->pd)) { ipath_rkey_ok()
|
H A D | ipath_verbs.h | 158 struct ipath_qp *qp; member in struct:ipath_mcast_qp 277 * in qp->s_max_sge. 291 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). 480 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, get_swqe_ptr() argument 483 return (struct ipath_swqe *)((char *)qp->s_wq + get_swqe_ptr() 485 qp->s_max_sge * get_swqe_ptr() 646 struct ipath_qp *qp; member in struct:ipath_verbs_txreq 693 static inline void ipath_schedule_send(struct ipath_qp *qp) ipath_schedule_send() argument 695 if (qp->s_flags & IPATH_S_ANY_WAIT) ipath_schedule_send() 696 qp->s_flags &= ~IPATH_S_ANY_WAIT; ipath_schedule_send() 697 if (!(qp->s_flags & IPATH_S_BUSY)) ipath_schedule_send() 698 tasklet_hi_schedule(&qp->s_task); ipath_schedule_send() 732 __be32 ipath_compute_aeth(struct ipath_qp *qp); 742 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err); 754 void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 758 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, 766 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 769 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 771 void ipath_restart_rc(struct ipath_qp *qp, u32 psn); 773 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err); 775 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); 778 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 785 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, 788 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, 855 void ipath_insert_rnr_queue(struct ipath_qp *qp); 857 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, 860 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); 865 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, 871 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, 874 int ipath_make_rc_req(struct ipath_qp *qp); 876 int ipath_make_uc_req(struct ipath_qp *qp); 878 int ipath_make_ud_req(struct ipath_qp *qp);
|
H A D | ipath_cq.c | 46 * This may be called with qp->s_lock held. 87 wc->uqueue[head].qp_num = entry->qp->qp_num; ipath_cq_enter()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
H A D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
H A D | qp.c | 44 #include <linux/mlx4/qp.h> 81 struct mlx4_ib_qp qp; member in struct:mlx4_ib_sqp 122 return container_of(mqp, struct mlx4_ib_sqp, qp); to_msqp() 125 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_tunnel_qp() argument 130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && is_tunnel_qp() 131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + is_tunnel_qp() 135 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_sqp() argument 142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_sqp() 143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); is_sqp() 149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || is_sqp() 150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { is_sqp() 160 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) is_qp0() argument 167 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_qp0() 168 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); is_qp0() 174 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { is_qp0() 183 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) get_wqe() argument 185 return mlx4_buf_offset(&qp->buf, offset); get_wqe() 188 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) get_recv_wqe() argument 190 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); get_recv_wqe() 193 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) get_send_wqe() argument 195 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); get_send_wqe() 207 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) stamp_send_wqe() argument 217 if (qp->sq_max_wqes_per_wr > 1) { stamp_send_wqe() 218 s = roundup(size, 1U << qp->sq.wqe_shift); stamp_send_wqe() 220 ind = (i >> qp->sq.wqe_shift) + n; stamp_send_wqe() 221 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : stamp_send_wqe() 223 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); stamp_send_wqe() 224 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); stamp_send_wqe() 228 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); stamp_send_wqe() 237 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) post_nop_wqe() argument 244 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); post_nop_wqe() 247 if (qp->ibqp.qp_type == IB_QPT_UD) { post_nop_wqe() 251 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); post_nop_wqe() 269 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); post_nop_wqe() 271 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); post_nop_wqe() 275 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) pad_wraparound() argument 277 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); pad_wraparound() 278 if (unlikely(s < qp->sq_max_wqes_per_wr)) { pad_wraparound() 279 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); pad_wraparound() 285 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) mlx4_ib_qp_event() argument 288 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; mlx4_ib_qp_event() 291 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; mlx4_ib_qp_event() 295 event.element.qp = ibqp; mlx4_ib_qp_event() 323 "on QP %06x\n", type, qp->qpn); mlx4_ib_qp_event() 378 int is_user, int has_rq, struct mlx4_ib_qp *qp) set_rq_size() 389 qp->rq.wqe_cnt = qp->rq.max_gs = 0; set_rq_size() 395 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); set_rq_size() 396 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); set_rq_size() 397 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); set_rq_size() 402 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size() 403 cap->max_recv_sge = qp->rq.max_gs; set_rq_size() 405 cap->max_recv_wr = qp->rq.max_post = set_rq_size() 406 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); set_rq_size() 407 cap->max_recv_sge = min(qp->rq.max_gs, set_rq_size() 416 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) set_kernel_sq_size() 423 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + set_kernel_sq_size() 438 send_wqe_overhead(type, qp->flags); set_kernel_sq_size() 475 qp->sq_signal_bits && BITS_PER_LONG == 64 && set_kernel_sq_size() 479 qp->sq.wqe_shift = ilog2(64); set_kernel_sq_size() 481 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); set_kernel_sq_size() 484 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); set_kernel_sq_size() 490 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; set_kernel_sq_size() 491 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * set_kernel_sq_size() 492 qp->sq_max_wqes_per_wr + set_kernel_sq_size() 493 qp->sq_spare_wqes); set_kernel_sq_size() 495 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) set_kernel_sq_size() 498 if (qp->sq_max_wqes_per_wr <= 1) set_kernel_sq_size() 501 ++qp->sq.wqe_shift; set_kernel_sq_size() 504 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, set_kernel_sq_size() 505 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - set_kernel_sq_size() 506 send_wqe_overhead(type, qp->flags)) / set_kernel_sq_size() 509 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_kernel_sq_size() 510 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_kernel_sq_size() 511 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { set_kernel_sq_size() 512 qp->rq.offset = 0; set_kernel_sq_size() 513 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; set_kernel_sq_size() 515 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; set_kernel_sq_size() 516 qp->sq.offset = 0; set_kernel_sq_size() 519 cap->max_send_wr = qp->sq.max_post = set_kernel_sq_size() 520 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; set_kernel_sq_size() 521 cap->max_send_sge = min(qp->sq.max_gs, set_kernel_sq_size() 531 struct mlx4_ib_qp *qp, set_user_sq_size() 541 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; set_user_sq_size() 542 qp->sq.wqe_shift = ucmd->log_sq_stride; set_user_sq_size() 544 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_user_sq_size() 545 (qp->sq.wqe_cnt << qp->sq.wqe_shift); set_user_sq_size() 550 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) alloc_proxy_bufs() argument 554 qp->sqp_proxy_rcv = alloc_proxy_bufs() 555 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, alloc_proxy_bufs() 557 if (!qp->sqp_proxy_rcv) alloc_proxy_bufs() 559 for (i = 0; i < qp->rq.wqe_cnt; i++) { alloc_proxy_bufs() 560 qp->sqp_proxy_rcv[i].addr = alloc_proxy_bufs() 563 if (!qp->sqp_proxy_rcv[i].addr) alloc_proxy_bufs() 565 qp->sqp_proxy_rcv[i].map = alloc_proxy_bufs() 566 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, alloc_proxy_bufs() 569 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { alloc_proxy_bufs() 570 kfree(qp->sqp_proxy_rcv[i].addr); alloc_proxy_bufs() 579 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, alloc_proxy_bufs() 582 kfree(qp->sqp_proxy_rcv[i].addr); alloc_proxy_bufs() 584 kfree(qp->sqp_proxy_rcv); alloc_proxy_bufs() 585 qp->sqp_proxy_rcv = NULL; alloc_proxy_bufs() 589 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) free_proxy_bufs() argument 593 for (i = 0; i < qp->rq.wqe_cnt; i++) { free_proxy_bufs() 594 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, free_proxy_bufs() 597 kfree(qp->sqp_proxy_rcv[i].addr); free_proxy_bufs() 599 kfree(qp->sqp_proxy_rcv); free_proxy_bufs() 628 struct mlx4_ib_qp *qp; create_qp_common() local 633 /* When tunneling special qps, we use a plain UD qp */ create_qp_common() 681 qp = &sqp->qp; create_qp_common() 682 qp->pri.vid = 0xFFFF; create_qp_common() 683 qp->alt.vid = 0xFFFF; create_qp_common() 685 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); create_qp_common() 686 if (!qp) create_qp_common() 688 qp->pri.vid = 0xFFFF; create_qp_common() 689 qp->alt.vid = 0xFFFF; create_qp_common() 692 qp = *caller_qp; create_qp_common() 694 qp->mlx4_ib_qp_type = qp_type; create_qp_common() 696 mutex_init(&qp->mutex); create_qp_common() 697 spin_lock_init(&qp->sq.lock); create_qp_common() 698 spin_lock_init(&qp->rq.lock); create_qp_common() 699 INIT_LIST_HEAD(&qp->gid_list); create_qp_common() 700 INIT_LIST_HEAD(&qp->steering_rules); create_qp_common() 702 qp->state = IB_QPS_RESET; create_qp_common() 704 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); create_qp_common() 706 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); create_qp_common() 718 qp->sq_no_prefetch = ucmd.sq_no_prefetch; create_qp_common() 720 err = set_user_sq_size(dev, qp, &ucmd); create_qp_common() 724 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_qp_common() 725 qp->buf_size, 0, 0); create_qp_common() 726 if (IS_ERR(qp->umem)) { create_qp_common() 727 err = PTR_ERR(qp->umem); create_qp_common() 731 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), create_qp_common() 732 ilog2(qp->umem->page_size), &qp->mtt); create_qp_common() 736 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); create_qp_common() 742 ucmd.db_addr, &qp->db); create_qp_common() 747 qp->sq_no_prefetch = 0; create_qp_common() 750 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; create_qp_common() 753 qp->flags |= MLX4_IB_QP_LSO; create_qp_common() 758 qp->flags |= MLX4_IB_QP_NETIF; create_qp_common() 763 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); create_qp_common() 768 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); create_qp_common() 772 *qp->db.db = 0; create_qp_common() 775 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { create_qp_common() 780 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, create_qp_common() 781 &qp->mtt); create_qp_common() 785 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); create_qp_common() 789 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); create_qp_common() 790 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); create_qp_common() 791 if (!qp->sq.wrid || !qp->rq.wrid) { create_qp_common() 798 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | create_qp_common() 800 if (alloc_proxy_bufs(pd->device, qp)) { create_qp_common() 816 if (qp->flags & MLX4_IB_QP_NETIF) create_qp_common() 825 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); create_qp_common() 830 qp->mqp.qpn |= (1 << 23); create_qp_common() 837 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common() 839 qp->mqp.event = mlx4_ib_qp_event; create_qp_common() 841 *caller_qp = qp; create_qp_common() 849 list_add_tail(&qp->qps_list, &dev->qp_list); create_qp_common() 854 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); create_qp_common() 856 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); create_qp_common() 864 if (qp->flags & MLX4_IB_QP_NETIF) create_qp_common() 870 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) create_qp_common() 871 free_proxy_bufs(pd->device, qp); create_qp_common() 875 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); create_qp_common() 877 kfree(qp->sq.wrid); create_qp_common() 878 kfree(qp->rq.wrid); create_qp_common() 882 mlx4_mtt_cleanup(dev->dev, &qp->mtt); create_qp_common() 886 ib_umem_release(qp->umem); create_qp_common() 888 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); create_qp_common() 892 mlx4_db_free(dev->dev, &qp->db); create_qp_common() 896 kfree(qp); create_qp_common() 944 static void del_gid_entries(struct mlx4_ib_qp *qp) del_gid_entries() argument 948 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { del_gid_entries() 954 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) get_pd() argument 956 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) get_pd() 957 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); get_pd() 959 return to_mpd(qp->ibqp.pd); get_pd() 962 static void get_cqs(struct mlx4_ib_qp *qp, get_cqs() argument 965 switch (qp->ibqp.qp_type) { get_cqs() 967 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); get_cqs() 971 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs() 975 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs() 976 *recv_cq = to_mcq(qp->ibqp.recv_cq); get_cqs() 981 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, destroy_qp_common() argument 987 if (qp->state != IB_QPS_RESET) { destroy_qp_common() 988 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), destroy_qp_common() 989 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) destroy_qp_common() 991 qp->mqp.qpn); destroy_qp_common() 992 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { destroy_qp_common() 993 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); destroy_qp_common() 994 qp->pri.smac = 0; destroy_qp_common() 995 qp->pri.smac_port = 0; destroy_qp_common() 997 if (qp->alt.smac) { destroy_qp_common() 998 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); destroy_qp_common() 999 qp->alt.smac = 0; destroy_qp_common() 1001 if (qp->pri.vid < 0x1000) { destroy_qp_common() 1002 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); destroy_qp_common() 1003 qp->pri.vid = 0xFFFF; destroy_qp_common() 1004 qp->pri.candidate_vid = 0xFFFF; destroy_qp_common() 1005 qp->pri.update_vid = 0; destroy_qp_common() 1007 if (qp->alt.vid < 0x1000) { destroy_qp_common() 1008 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); destroy_qp_common() 1009 qp->alt.vid = 0xFFFF; destroy_qp_common() 1010 qp->alt.candidate_vid = 0xFFFF; destroy_qp_common() 1011 qp->alt.update_vid = 0; destroy_qp_common() 1015 get_cqs(qp, &send_cq, &recv_cq); destroy_qp_common() 1021 list_del(&qp->qps_list); destroy_qp_common() 1022 list_del(&qp->cq_send_list); destroy_qp_common() 1023 list_del(&qp->cq_recv_list); destroy_qp_common() 1025 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common() 1026 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); destroy_qp_common() 1028 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common() 1031 mlx4_qp_remove(dev->dev, &qp->mqp); destroy_qp_common() 1036 mlx4_qp_free(dev->dev, &qp->mqp); destroy_qp_common() 1038 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { destroy_qp_common() 1039 if (qp->flags & MLX4_IB_QP_NETIF) destroy_qp_common() 1040 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); destroy_qp_common() 1042 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); destroy_qp_common() 1045 mlx4_mtt_cleanup(dev->dev, &qp->mtt); destroy_qp_common() 1048 if (qp->rq.wqe_cnt) destroy_qp_common() 1049 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), destroy_qp_common() 1050 &qp->db); destroy_qp_common() 1051 ib_umem_release(qp->umem); destroy_qp_common() 1053 kfree(qp->sq.wrid); destroy_qp_common() 1054 kfree(qp->rq.wrid); destroy_qp_common() 1055 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | destroy_qp_common() 1057 free_proxy_bufs(&dev->ib_dev, qp); destroy_qp_common() 1058 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); destroy_qp_common() 1059 if (qp->rq.wqe_cnt) destroy_qp_common() 1060 mlx4_db_free(dev->dev, &qp->db); destroy_qp_common() 1063 del_gid_entries(qp); destroy_qp_common() 1087 struct mlx4_ib_qp *qp = NULL; mlx4_ib_create_qp() local 1133 qp = kzalloc(sizeof *qp, gfp); mlx4_ib_create_qp() 1134 if (!qp) mlx4_ib_create_qp() 1136 qp->pri.vid = 0xFFFF; mlx4_ib_create_qp() 1137 qp->alt.vid = 0xFFFF; mlx4_ib_create_qp() 1142 udata, 0, &qp, gfp); mlx4_ib_create_qp() 1146 qp->ibqp.qp_num = qp->mqp.qpn; mlx4_ib_create_qp() 1147 qp->xrcdn = xrcdn; mlx4_ib_create_qp() 1160 &qp, gfp); mlx4_ib_create_qp() 1164 qp->port = init_attr->port_num; mlx4_ib_create_qp() 1165 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; mlx4_ib_create_qp() 1174 return &qp->ibqp; mlx4_ib_create_qp() 1177 int mlx4_ib_destroy_qp(struct ib_qp *qp) mlx4_ib_destroy_qp() argument 1179 struct mlx4_ib_dev *dev = to_mdev(qp->device); mlx4_ib_destroy_qp() 1180 struct mlx4_ib_qp *mqp = to_mqp(qp); mlx4_ib_destroy_qp() 1227 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, to_mlx4_access_flags() argument 1237 dest_rd_atomic = qp->resp_depth; to_mlx4_access_flags() 1242 access_flags = qp->atomic_rd_en; to_mlx4_access_flags() 1335 /* no current vlan tag in qp */ _mlx4_set_path() 1348 /* have current vlan tag. unregister it at modify-qp success */ _mlx4_set_path() 1387 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, mlx4_set_path() argument 1392 return _mlx4_set_path(dev, &qp->ah_attr, mlx4_set_path() 1393 mlx4_mac_to_u64((u8 *)qp->smac), mlx4_set_path() 1394 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, mlx4_set_path() 1399 const struct ib_qp_attr *qp, mlx4_set_alt_path() 1404 return _mlx4_set_path(dev, &qp->alt_ah_attr, mlx4_set_alt_path() 1405 mlx4_mac_to_u64((u8 *)qp->alt_smac), mlx4_set_alt_path() 1407 qp->alt_vlan_id : 0xffff, mlx4_set_alt_path() 1411 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) update_mcg_macs() argument 1415 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { update_mcg_macs() 1416 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { update_mcg_macs() 1418 ge->port = qp->port; update_mcg_macs() 1423 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, handle_eth_ud_smac_index() argument 1429 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); handle_eth_ud_smac_index() 1431 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); handle_eth_ud_smac_index() 1432 if (!qp->pri.smac && !qp->pri.smac_port) { handle_eth_ud_smac_index() 1433 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); handle_eth_ud_smac_index() 1435 qp->pri.candidate_smac_index = smac_index; handle_eth_ud_smac_index() 1436 qp->pri.candidate_smac = u64_mac; handle_eth_ud_smac_index() 1437 qp->pri.candidate_smac_port = qp->port; handle_eth_ud_smac_index() 1451 struct mlx4_ib_qp *qp = to_mqp(ibqp); __mlx4_ib_modify_qp() local 1462 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == __mlx4_ib_modify_qp() 1471 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); __mlx4_ib_modify_qp() 1495 if (qp->flags & MLX4_IB_QP_LSO) __mlx4_ib_modify_qp() 1510 if (qp->rq.wqe_cnt) __mlx4_ib_modify_qp() 1511 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; __mlx4_ib_modify_qp() 1512 context->rq_size_stride |= qp->rq.wqe_shift - 4; __mlx4_ib_modify_qp() 1514 if (qp->sq.wqe_cnt) __mlx4_ib_modify_qp() 1515 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; __mlx4_ib_modify_qp() 1516 context->sq_size_stride |= qp->sq.wqe_shift - 4; __mlx4_ib_modify_qp() 1519 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; __mlx4_ib_modify_qp() 1520 context->xrcd = cpu_to_be32((u32) qp->xrcdn); __mlx4_ib_modify_qp() 1525 if (qp->ibqp.uobject) __mlx4_ib_modify_qp() 1542 if (dev->counters[qp->port - 1] != -1) { __mlx4_ib_modify_qp() 1544 dev->counters[qp->port - 1]; __mlx4_ib_modify_qp() 1549 if (qp->flags & MLX4_IB_QP_NETIF) { __mlx4_ib_modify_qp() 1550 mlx4_ib_steer_qp_reg(dev, qp, 1); __mlx4_ib_modify_qp() 1556 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) __mlx4_ib_modify_qp() 1563 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, __mlx4_ib_modify_qp() 1565 attr->port_num : qp->port)) __mlx4_ib_modify_qp() 1586 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, __mlx4_ib_modify_qp() 1596 pd = get_pd(qp); __mlx4_ib_modify_qp() 1597 get_cqs(qp, &send_cq, &recv_cq); __mlx4_ib_modify_qp() 1604 if (!qp->ibqp.uobject) __mlx4_ib_modify_qp() 1635 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); __mlx4_ib_modify_qp() 1649 /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ __mlx4_ib_modify_qp() 1651 if (qp->mlx4_ib_qp_type & __mlx4_ib_modify_qp() 1656 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && __mlx4_ib_modify_qp() 1673 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) __mlx4_ib_modify_qp() 1674 context->db_rec_addr = cpu_to_be64(qp->db.dma); __mlx4_ib_modify_qp() 1681 context->pri_path.sched_queue = (qp->port - 1) << 6; __mlx4_ib_modify_qp() 1682 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || __mlx4_ib_modify_qp() 1683 qp->mlx4_ib_qp_type & __mlx4_ib_modify_qp() 1686 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) __mlx4_ib_modify_qp() 1689 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) __mlx4_ib_modify_qp() 1693 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == __mlx4_ib_modify_qp() 1695 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || __mlx4_ib_modify_qp() 1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) __mlx4_ib_modify_qp() 1699 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || __mlx4_ib_modify_qp() 1700 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || __mlx4_ib_modify_qp() 1701 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { __mlx4_ib_modify_qp() 1702 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); __mlx4_ib_modify_qp() 1707 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) __mlx4_ib_modify_qp() 1708 dev->qp1_proxy[qp->port - 1] = qp; __mlx4_ib_modify_qp() 1713 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { __mlx4_ib_modify_qp() 1725 &dev->ib_dev, qp->port) == __mlx4_ib_modify_qp() 1753 for (i = 0; i < qp->sq.wqe_cnt; ++i) { __mlx4_ib_modify_qp() 1754 ctrl = get_send_wqe(qp, i); __mlx4_ib_modify_qp() 1756 if (qp->sq_max_wqes_per_wr == 1) __mlx4_ib_modify_qp() 1757 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); __mlx4_ib_modify_qp() 1759 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); __mlx4_ib_modify_qp() 1763 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), __mlx4_ib_modify_qp() 1765 sqd_event, &qp->mqp); __mlx4_ib_modify_qp() 1769 qp->state = new_state; __mlx4_ib_modify_qp() 1772 qp->atomic_rd_en = attr->qp_access_flags; __mlx4_ib_modify_qp() 1774 qp->resp_depth = attr->max_dest_rd_atomic; __mlx4_ib_modify_qp() 1776 qp->port = attr->port_num; __mlx4_ib_modify_qp() 1777 update_mcg_macs(dev, qp); __mlx4_ib_modify_qp() 1780 qp->alt_port = attr->alt_port_num; __mlx4_ib_modify_qp() 1782 if (is_sqp(dev, qp)) __mlx4_ib_modify_qp() 1783 store_sqp_attrs(to_msqp(qp), attr, attr_mask); __mlx4_ib_modify_qp() 1789 if (is_qp0(dev, qp)) { __mlx4_ib_modify_qp() 1791 if (mlx4_INIT_PORT(dev->dev, qp->port)) __mlx4_ib_modify_qp() 1793 qp->port); __mlx4_ib_modify_qp() 1797 mlx4_CLOSE_PORT(dev->dev, qp->port); __mlx4_ib_modify_qp() 1806 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx4_ib_modify_qp() 1809 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx4_ib_modify_qp() 1811 qp->rq.head = 0; __mlx4_ib_modify_qp() 1812 qp->rq.tail = 0; __mlx4_ib_modify_qp() 1813 qp->sq.head = 0; __mlx4_ib_modify_qp() 1814 qp->sq.tail = 0; __mlx4_ib_modify_qp() 1815 qp->sq_next_wqe = 0; __mlx4_ib_modify_qp() 1816 if (qp->rq.wqe_cnt) __mlx4_ib_modify_qp() 1817 *qp->db.db = 0; __mlx4_ib_modify_qp() 1819 if (qp->flags & MLX4_IB_QP_NETIF) __mlx4_ib_modify_qp() 1820 mlx4_ib_steer_qp_reg(dev, qp, 0); __mlx4_ib_modify_qp() 1822 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { __mlx4_ib_modify_qp() 1823 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); __mlx4_ib_modify_qp() 1824 qp->pri.smac = 0; __mlx4_ib_modify_qp() 1825 qp->pri.smac_port = 0; __mlx4_ib_modify_qp() 1827 if (qp->alt.smac) { __mlx4_ib_modify_qp() 1828 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); __mlx4_ib_modify_qp() 1829 qp->alt.smac = 0; __mlx4_ib_modify_qp() 1831 if (qp->pri.vid < 0x1000) { __mlx4_ib_modify_qp() 1832 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); __mlx4_ib_modify_qp() 1833 qp->pri.vid = 0xFFFF; __mlx4_ib_modify_qp() 1834 qp->pri.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp() 1835 qp->pri.update_vid = 0; __mlx4_ib_modify_qp() 1838 if (qp->alt.vid < 0x1000) { __mlx4_ib_modify_qp() 1839 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); __mlx4_ib_modify_qp() 1840 qp->alt.vid = 0xFFFF; __mlx4_ib_modify_qp() 1841 qp->alt.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp() 1842 qp->alt.update_vid = 0; __mlx4_ib_modify_qp() 1847 mlx4_ib_steer_qp_reg(dev, qp, 0); __mlx4_ib_modify_qp() 1849 if (qp->pri.candidate_smac || __mlx4_ib_modify_qp() 1850 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { __mlx4_ib_modify_qp() 1852 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); __mlx4_ib_modify_qp() 1854 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) __mlx4_ib_modify_qp() 1855 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); __mlx4_ib_modify_qp() 1856 qp->pri.smac = qp->pri.candidate_smac; __mlx4_ib_modify_qp() 1857 qp->pri.smac_index = qp->pri.candidate_smac_index; __mlx4_ib_modify_qp() 1858 qp->pri.smac_port = qp->pri.candidate_smac_port; __mlx4_ib_modify_qp() 1860 qp->pri.candidate_smac = 0; __mlx4_ib_modify_qp() 1861 qp->pri.candidate_smac_index = 0; __mlx4_ib_modify_qp() 1862 qp->pri.candidate_smac_port = 0; __mlx4_ib_modify_qp() 1864 if (qp->alt.candidate_smac) { __mlx4_ib_modify_qp() 1866 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); __mlx4_ib_modify_qp() 1868 if (qp->alt.smac) __mlx4_ib_modify_qp() 1869 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); __mlx4_ib_modify_qp() 1870 qp->alt.smac = qp->alt.candidate_smac; __mlx4_ib_modify_qp() 1871 qp->alt.smac_index = qp->alt.candidate_smac_index; __mlx4_ib_modify_qp() 1872 qp->alt.smac_port = qp->alt.candidate_smac_port; __mlx4_ib_modify_qp() 1874 qp->alt.candidate_smac = 0; __mlx4_ib_modify_qp() 1875 qp->alt.candidate_smac_index = 0; __mlx4_ib_modify_qp() 1876 qp->alt.candidate_smac_port = 0; __mlx4_ib_modify_qp() 1879 if (qp->pri.update_vid) { __mlx4_ib_modify_qp() 1881 if (qp->pri.candidate_vid < 0x1000) __mlx4_ib_modify_qp() 1882 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, __mlx4_ib_modify_qp() 1883 qp->pri.candidate_vid); __mlx4_ib_modify_qp() 1885 if (qp->pri.vid < 0x1000) __mlx4_ib_modify_qp() 1886 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, __mlx4_ib_modify_qp() 1887 qp->pri.vid); __mlx4_ib_modify_qp() 1888 qp->pri.vid = qp->pri.candidate_vid; __mlx4_ib_modify_qp() 1889 qp->pri.vlan_port = qp->pri.candidate_vlan_port; __mlx4_ib_modify_qp() 1890 qp->pri.vlan_index = qp->pri.candidate_vlan_index; __mlx4_ib_modify_qp() 1892 qp->pri.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp() 1893 qp->pri.update_vid = 0; __mlx4_ib_modify_qp() 1896 if (qp->alt.update_vid) { __mlx4_ib_modify_qp() 1898 if (qp->alt.candidate_vid < 0x1000) __mlx4_ib_modify_qp() 1899 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, __mlx4_ib_modify_qp() 1900 qp->alt.candidate_vid); __mlx4_ib_modify_qp() 1902 if (qp->alt.vid < 0x1000) __mlx4_ib_modify_qp() 1903 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, __mlx4_ib_modify_qp() 1904 qp->alt.vid); __mlx4_ib_modify_qp() 1905 qp->alt.vid = qp->alt.candidate_vid; __mlx4_ib_modify_qp() 1906 qp->alt.vlan_port = qp->alt.candidate_vlan_port; __mlx4_ib_modify_qp() 1907 qp->alt.vlan_index = qp->alt.candidate_vlan_index; __mlx4_ib_modify_qp() 1909 qp->alt.candidate_vid = 0xFFFF; __mlx4_ib_modify_qp() 1910 qp->alt.update_vid = 0; __mlx4_ib_modify_qp() 1920 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_modify_qp() local 1924 mutex_lock(&qp->mutex); mlx4_ib_modify_qp() 1926 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; mlx4_ib_modify_qp() 1932 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx4_ib_modify_qp() 1977 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx4_ib_modify_qp() 2016 mutex_unlock(&qp->mutex); mlx4_ib_modify_qp() 2037 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); build_sriov_qp0_header() 2059 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) build_sriov_qp0_header() 2064 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { build_sriov_qp0_header() 2081 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); build_sriov_qp0_header() 2083 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) build_sriov_qp0_header() 2087 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); build_sriov_qp0_header() 2091 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header() 2094 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header() 2098 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); build_sriov_qp0_header() 2159 struct ib_device *ib_dev = sqp->qp.ibqp.device; build_mlx_header() 2180 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; build_mlx_header() 2228 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header() 2231 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header() 2246 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | build_mlx_header() 2283 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); build_mlx_header() 2302 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; build_mlx_header() 2307 if (!sqp->qp.ibqp.qp_num) build_mlx_header() 2308 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); build_mlx_header() 2310 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); build_mlx_header() 2316 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); build_mlx_header() 2598 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, build_lso_seg() 2606 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && build_lso_seg() 2607 wr->num_sge > qp->sq.max_gs - (halign >> 4))) build_lso_seg() 2642 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_post_send() local 2660 spin_lock_irqsave(&qp->sq.lock, flags); mlx4_ib_post_send() 2668 ind = qp->sq_next_wqe; mlx4_ib_post_send() 2674 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mlx4_ib_post_send() 2680 if (unlikely(wr->num_sge > qp->sq.max_gs)) { mlx4_ib_post_send() 2686 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); mlx4_ib_post_send() 2687 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; mlx4_ib_post_send() 2697 qp->sq_signal_bits; mlx4_ib_post_send() 2704 switch (qp->mlx4_ib_qp_type) { mlx4_ib_post_send() 2775 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); mlx4_ib_post_send() 2785 /* this is a UD qp used in MAD responses to slaves. */ mlx4_ib_post_send() 2798 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); mlx4_ib_post_send() 2810 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); mlx4_ib_post_send() 2827 /* If we are tunneling special qps, this is a UD qp. mlx4_ib_post_send() 2829 * the tunnel qp, and then add a header with address mlx4_ib_post_send() 2832 qp->mlx4_ib_qp_type); mlx4_ib_post_send() 2842 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); mlx4_ib_post_send() 2867 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || mlx4_ib_post_send() 2868 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || mlx4_ib_post_send() 2869 qp->mlx4_ib_qp_type & mlx4_ib_post_send() 2903 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; mlx4_ib_post_send() 2905 stamp = ind + qp->sq_spare_wqes; mlx4_ib_post_send() 2906 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); mlx4_ib_post_send() 2918 stamp_send_wqe(qp, stamp, size * 16); mlx4_ib_post_send() 2919 ind = pad_wraparound(qp, ind); mlx4_ib_post_send() 2925 qp->sq.head += nreq; mlx4_ib_post_send() 2933 writel(qp->doorbell_qpn, mlx4_ib_post_send() 2942 stamp_send_wqe(qp, stamp, size * 16); mlx4_ib_post_send() 2944 ind = pad_wraparound(qp, ind); mlx4_ib_post_send() 2945 qp->sq_next_wqe = ind; mlx4_ib_post_send() 2948 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx4_ib_post_send() 2956 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_post_recv() local 2966 max_gs = qp->rq.max_gs; mlx4_ib_post_recv() 2967 spin_lock_irqsave(&qp->rq.lock, flags); mlx4_ib_post_recv() 2976 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); mlx4_ib_post_recv() 2979 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mlx4_ib_post_recv() 2985 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mlx4_ib_post_recv() 2991 scat = get_recv_wqe(qp, ind); mlx4_ib_post_recv() 2993 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | mlx4_ib_post_recv() 2996 qp->sqp_proxy_rcv[ind].map, mlx4_ib_post_recv() 3003 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); mlx4_ib_post_recv() 3017 qp->rq.wrid[ind] = wr->wr_id; mlx4_ib_post_recv() 3019 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); mlx4_ib_post_recv() 3024 qp->rq.head += nreq; mlx4_ib_post_recv() 3032 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); mlx4_ib_post_recv() 3035 spin_unlock_irqrestore(&qp->rq.lock, flags); mlx4_ib_post_recv() 3119 struct mlx4_ib_qp *qp = to_mqp(ibqp); mlx4_ib_query_qp() local 3124 mutex_lock(&qp->mutex); mlx4_ib_query_qp() 3126 if (qp->state == IB_QPS_RESET) { mlx4_ib_query_qp() 3131 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); mlx4_ib_query_qp() 3139 qp->state = to_ib_qp_state(mlx4_state); mlx4_ib_query_qp() 3140 qp_attr->qp_state = qp->state; mlx4_ib_query_qp() 3151 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { mlx4_ib_query_qp() 3160 qp_attr->port_num = qp->port; mlx4_ib_query_qp() 3164 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mlx4_ib_query_qp() 3180 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; mlx4_ib_query_qp() 3181 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mlx4_ib_query_qp() 3184 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx4_ib_query_qp() 3185 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx4_ib_query_qp() 3200 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) mlx4_ib_query_qp() 3203 if (qp->flags & MLX4_IB_QP_LSO) mlx4_ib_query_qp() 3206 if (qp->flags & MLX4_IB_QP_NETIF) mlx4_ib_query_qp() 3210 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? mlx4_ib_query_qp() 3214 mutex_unlock(&qp->mutex); mlx4_ib_query_qp() 377 set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, int is_user, int has_rq, struct mlx4_ib_qp *qp) set_rq_size() argument 415 set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) set_kernel_sq_size() argument 530 set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) set_user_sq_size() argument 1398 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) mlx4_set_alt_path() argument 2597 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) build_lso_seg() argument
|
H A D | mad.c | 164 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); mlx4_MAD_IFC() 207 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, update_sm_ah() 485 /* check if proxy qp created */ mlx4_ib_send_to_slave() 490 tun_qp = &tun_ctx->qp[0]; mlx4_ib_send_to_slave() 492 tun_qp = &tun_ctx->qp[1]; mlx4_ib_send_to_slave() 511 src_qp = tun_qp->qp; mlx4_ib_send_to_slave() 646 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); mlx4_ib_demux_mad() 648 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", mlx4_ib_demux_mad() 717 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); mlx4_ib_demux_mad() 719 pr_debug("failed sending to slave %d via tunnel qp (%d)\n", mlx4_ib_demux_mad() 732 if (in_wc && in_wc->qp->qp_num) { ib_process_mad() 737 in_wc->qp->qp_num, ib_process_mad() 1125 size = (tun_qp->qp->qp_type == IB_QPT_UD) ? mlx4_ib_post_pv_qp_buf() 1139 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); mlx4_ib_post_pv_qp_buf() 1187 /* check if proxy qp created */ mlx4_ib_send_to_wire() 1193 sqp = &sqp_ctx->qp[0]; mlx4_ib_send_to_wire() 1197 sqp = &sqp_ctx->qp[1]; mlx4_ib_send_to_wire() 1201 send_qp = sqp->qp; mlx4_ib_send_to_wire() 1284 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; mlx4_ib_multiplex_mad() 1401 tun_qp = &ctx->qp[qp_type]; mlx4_ib_alloc_pv_bufs() 1495 tun_qp = &ctx->qp[qp_type]; mlx4_ib_free_pv_qp_bufs() 1532 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_tunnel_comp_worker() 1598 tun_qp = &ctx->qp[qp_type]; create_pv_sqp() 1624 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); create_pv_sqp() 1625 if (IS_ERR(tun_qp->qp)) { create_pv_sqp() 1626 ret = PTR_ERR(tun_qp->qp); create_pv_sqp() 1627 tun_qp->qp = NULL; create_pv_sqp() 1645 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); create_pv_sqp() 1647 pr_err("Couldn't change %s qp state to INIT (%d)\n", create_pv_sqp() 1652 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); create_pv_sqp() 1654 pr_err("Couldn't change %s qp state to RTR (%d)\n", create_pv_sqp() 1660 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); create_pv_sqp() 1662 pr_err("Couldn't change %s qp state to RTS (%d)\n", create_pv_sqp() 1678 ib_destroy_qp(tun_qp->qp); create_pv_sqp() 1679 tun_qp->qp = NULL; create_pv_sqp() 1698 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_sqp_comp_worker() 1859 ib_destroy_qp(ctx->qp[1].qp); create_pv_resources() 1860 ctx->qp[1].qp = NULL; create_pv_resources() 1865 ib_destroy_qp(ctx->qp[0].qp); create_pv_resources() 1866 ctx->qp[0].qp = NULL; create_pv_resources() 1901 ib_destroy_qp(ctx->qp[0].qp); destroy_pv_resources() 1902 ctx->qp[0].qp = NULL; destroy_pv_resources() 1905 ib_destroy_qp(ctx->qp[1].qp); destroy_pv_resources() 1906 ctx->qp[1].qp = NULL; destroy_pv_resources() 1929 /* destroy the tunnel qp resources */ mlx4_ib_tunnels_update() 1935 /* create the tunnel qp resources */ mlx4_ib_tunnels_update() 2035 ib_destroy_qp(sqp_ctx->qp[0].qp); mlx4_ib_free_sqp_ctx() 2036 sqp_ctx->qp[0].qp = NULL; mlx4_ib_free_sqp_ctx() 2039 ib_destroy_qp(sqp_ctx->qp[1].qp); mlx4_ib_free_sqp_ctx() 2040 sqp_ctx->qp[1].qp = NULL; mlx4_ib_free_sqp_ctx()
|
H A D | cq.c | 35 #include <linux/mlx4/qp.h> 570 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, use_tunnel_data() argument 575 ib_dma_sync_single_for_cpu(qp->ibqp.device, use_tunnel_data() 576 qp->sqp_proxy_rcv[tail].map, use_tunnel_data() 579 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); use_tunnel_data() 598 static void mlx4_ib_qp_sw_comp(struct mlx4_ib_qp *qp, int num_entries, mlx4_ib_qp_sw_comp() argument 605 wq = is_send ? &qp->sq : &qp->rq; mlx4_ib_qp_sw_comp() 617 wc->qp = &qp->ibqp; mlx4_ib_qp_sw_comp() 625 struct mlx4_ib_qp *qp; mlx4_ib_poll_sw_comp() local 631 list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { mlx4_ib_poll_sw_comp() 632 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); mlx4_ib_poll_sw_comp() 637 list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { mlx4_ib_poll_sw_comp() 638 mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 0); mlx4_ib_poll_sw_comp() 723 wc->qp = &(*cur_qp)->ibqp; mlx4_ib_poll_one() 725 if (wc->qp->qp_type == IB_QPT_XRC_TGT) { mlx4_ib_poll_one() 843 is_eth = (rdma_port_get_link_layer(wc->qp->device, mlx4_ib_poll_one()
|
H A D | main.c | 51 #include <linux/mlx4/qp.h> 207 props->max_qp = dev->dev->quotas.qp; mlx4_ib_query_device() 930 static int __mlx4_ib_default_rules_match(struct ib_qp *qp, __mlx4_ib_default_rules_match() argument 936 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port); __mlx4_ib_default_rules_match() 986 struct ib_qp *qp, __mlx4_ib_create_default_rules() 1022 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, __mlx4_ib_create_flow() argument 1030 struct mlx4_ib_dev *mdev = to_mdev(qp->device); __mlx4_ib_create_flow() 1064 ctrl->qpn = cpu_to_be32(qp->qp_num); __mlx4_ib_create_flow() 1069 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); __mlx4_ib_create_flow() 1072 mdev, qp, default_table + default_flow, __mlx4_ib_create_flow() 1081 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow, __mlx4_ib_create_flow() 1117 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, mlx4_ib_tunnel_steer_add() argument 1122 struct mlx4_dev *dev = to_mdev(qp->device)->dev; mlx4_ib_tunnel_steer_add() 1135 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, mlx4_ib_tunnel_steer_add() 1136 flow_attr->port, qp->qp_num, mlx4_ib_tunnel_steer_add() 1142 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, mlx4_ib_create_flow() argument 1149 struct mlx4_dev *dev = (to_mdev(qp->device))->dev; mlx4_ib_create_flow() 1184 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], mlx4_ib_create_flow() 1194 err = __mlx4_ib_create_flow(qp, flow_attr, mlx4_ib_create_flow() 1206 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, mlx4_ib_create_flow() 1213 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, mlx4_ib_create_flow() 1227 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mlx4_ib_create_flow() 1233 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mlx4_ib_create_flow() 1246 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); mlx4_ib_destroy_flow() 1328 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) find_gid_entry() argument 1334 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { find_gid_entry() 1765 struct mlx4_ib_qp *qp; mlx4_ib_update_qps() local 1778 qp = ibdev->qp1_proxy[port - 1]; mlx4_ib_update_qps() 1779 if (qp) { mlx4_ib_update_qps() 1784 mutex_lock(&qp->mutex); mlx4_ib_update_qps() 1785 old_smac = qp->pri.smac; mlx4_ib_update_qps() 1795 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, mlx4_ib_update_qps() 1801 if (qp->pri.smac_port) mlx4_ib_update_qps() 1803 qp->pri.smac = new_smac; mlx4_ib_update_qps() 1804 qp->pri.smac_port = port; mlx4_ib_update_qps() 1805 qp->pri.smac_index = new_smac_index; mlx4_ib_update_qps() 1811 if (qp) mlx4_ib_update_qps() 1812 mutex_unlock(&qp->mutex); mlx4_ib_update_qps() 2594 pr_err("failed to allocate memory for tunneling qp update\n"); do_slave_init() 2601 pr_err("failed to allocate memory for tunneling qp update work struct\n"); do_slave_init() 2639 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ mlx4_ib_handle_catas_error() 984 __mlx4_ib_create_default_rules( struct mlx4_ib_dev *mdev, struct ib_qp *qp, const struct default_rules *pdefault_rules, struct _rule_hw *mlx4_spec) __mlx4_ib_create_default_rules() argument
|
H A D | mlx4_ib.h | 393 struct ib_qp *qp; member in struct:mlx4_ib_demux_pv_qp 420 struct mlx4_ib_demux_pv_qp qp[2]; member in struct:mlx4_ib_demux_pv_ctx 660 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 698 int mlx4_ib_destroy_qp(struct ib_qp *qp);
|
H A D | mr.c | 321 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, mlx4_ib_bind_mw() argument 336 ret = mlx4_ib_post_send(qp, &wr, &bad_wr); mlx4_ib_bind_mw()
|
H A D | srq.c | 34 #include <linux/mlx4/qp.h>
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
H A D | Makefile | 3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
|
H A D | qp.c | 89 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) get_wqe() argument 91 return mlx5_buf_offset(&qp->buf, offset); get_wqe() 94 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) get_recv_wqe() argument 96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); get_recv_wqe() 99 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) mlx5_get_send_wqe() argument 101 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); mlx5_get_send_wqe() 107 * @qp: QP to copy from. 121 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, mlx5_ib_read_user_wqe() argument 124 struct ib_device *ibdev = qp->ibqp.device; mlx5_ib_read_user_wqe() 126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; mlx5_ib_read_user_wqe() 129 struct ib_umem *umem = qp->umem; mlx5_ib_read_user_wqe() 136 qp->ibqp.qp_type); mlx5_ib_read_user_wqe() 175 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) mlx5_ib_qp_event() argument 177 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; mlx5_ib_qp_event() 181 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; mlx5_ib_qp_event() 185 event.element.qp = ibqp; mlx5_ib_qp_event() 212 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); mlx5_ib_qp_event() 221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_rq_size() 233 qp->rq.max_gs = 0; set_rq_size() 234 qp->rq.wqe_cnt = 0; set_rq_size() 235 qp->rq.wqe_shift = 0; set_rq_size() 238 qp->rq.wqe_cnt = ucmd->rq_wqe_count; set_rq_size() 239 qp->rq.wqe_shift = ucmd->rq_wqe_shift; set_rq_size() 240 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; set_rq_size() 241 qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size() 243 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; set_rq_size() 248 qp->rq.wqe_cnt = wq_size / wqe_size; set_rq_size() 255 qp->rq.wqe_shift = ilog2(wqe_size); set_rq_size() 256 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; set_rq_size() 257 qp->rq.max_post = qp->rq.wqe_cnt; set_rq_size() 331 struct mlx5_ib_qp *qp) calc_sq_size() 352 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - calc_sq_size() 354 attr->cap.max_inline_data = qp->max_inline_data; calc_sq_size() 357 qp->signature_en = true; calc_sq_size() 360 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; calc_sq_size() 361 if (qp->sq.wqe_cnt > gen->max_wqes) { calc_sq_size() 363 qp->sq.wqe_cnt, gen->max_wqes); calc_sq_size() 366 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); calc_sq_size() 367 qp->sq.max_gs = attr->cap.max_send_sge; calc_sq_size() 368 qp->sq.max_post = wq_size / wqe_size; calc_sq_size() 369 attr->cap.max_send_wr = qp->sq.max_post; calc_sq_size() 375 struct mlx5_ib_qp *qp, set_user_buf_size() 379 int desc_sz = 1 << qp->sq.wqe_shift; set_user_buf_size() 394 qp->sq.wqe_cnt = ucmd->sq_wqe_count; set_user_buf_size() 396 if (qp->sq.wqe_cnt > gen->max_wqes) { set_user_buf_size() 398 qp->sq.wqe_cnt, gen->max_wqes); set_user_buf_size() 402 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + set_user_buf_size() 403 (qp->sq.wqe_cnt << 6); set_user_buf_size() 602 struct mlx5_ib_qp *qp, struct ib_udata *udata, create_user_qp() 645 qp->rq.offset = 0; create_user_qp() 646 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); create_user_qp() 647 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_user_qp() 649 err = set_user_buf_size(dev, qp, &ucmd); create_user_qp() 653 if (ucmd.buf_addr && qp->buf_size) { create_user_qp() 654 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, create_user_qp() 655 qp->buf_size, 0, 0); create_user_qp() 656 if (IS_ERR(qp->umem)) { create_user_qp() 658 err = PTR_ERR(qp->umem); create_user_qp() 662 qp->umem = NULL; create_user_qp() 665 if (qp->umem) { create_user_qp() 666 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, create_user_qp() 674 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); create_user_qp() 683 if (qp->umem) create_user_qp() 684 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); create_user_qp() 691 qp->uuarn = uuarn; create_user_qp() 693 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); create_user_qp() 704 qp->create_type = MLX5_QP_USER; create_user_qp() 709 mlx5_ib_db_unmap_user(context, &qp->db); create_user_qp() 715 if (qp->umem) create_user_qp() 716 ib_umem_release(qp->umem); create_user_qp() 723 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) destroy_qp_user() argument 728 mlx5_ib_db_unmap_user(context, &qp->db); destroy_qp_user() 729 if (qp->umem) destroy_qp_user() 730 ib_umem_release(qp->umem); destroy_qp_user() 731 free_uuar(&context->uuari, qp->uuarn); destroy_qp_user() 736 struct mlx5_ib_qp *qp, create_kernel_qp() 758 qp->bf = &uuari->bfs[uuarn]; create_kernel_qp() 759 uar_index = qp->bf->uar->index; create_kernel_qp() 761 err = calc_sq_size(dev, init_attr, qp); create_kernel_qp() 767 qp->rq.offset = 0; create_kernel_qp() 768 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; create_kernel_qp() 769 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); create_kernel_qp() 771 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); create_kernel_qp() 777 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); create_kernel_qp() 778 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; create_kernel_qp() 786 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); create_kernel_qp() 791 mlx5_fill_page_array(&qp->buf, (*in)->pas); create_kernel_qp() 793 err = mlx5_db_alloc(dev->mdev, &qp->db); create_kernel_qp() 799 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); create_kernel_qp() 800 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); create_kernel_qp() 801 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); create_kernel_qp() 802 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); create_kernel_qp() 803 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); create_kernel_qp() 805 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || create_kernel_qp() 806 !qp->sq.w_list || !qp->sq.wqe_head) { create_kernel_qp() 810 qp->create_type = MLX5_QP_KERNEL; create_kernel_qp() 815 mlx5_db_free(dev->mdev, &qp->db); create_kernel_qp() 816 kfree(qp->sq.wqe_head); create_kernel_qp() 817 kfree(qp->sq.w_list); create_kernel_qp() 818 kfree(qp->sq.wrid); create_kernel_qp() 819 kfree(qp->sq.wr_data); create_kernel_qp() 820 kfree(qp->rq.wrid); create_kernel_qp() 826 mlx5_buf_free(dev->mdev, &qp->buf); create_kernel_qp() 833 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) destroy_qp_kernel() argument 835 mlx5_db_free(dev->mdev, &qp->db); destroy_qp_kernel() 836 kfree(qp->sq.wqe_head); destroy_qp_kernel() 837 kfree(qp->sq.w_list); destroy_qp_kernel() 838 kfree(qp->sq.wrid); destroy_qp_kernel() 839 kfree(qp->sq.wr_data); destroy_qp_kernel() 840 kfree(qp->rq.wrid); destroy_qp_kernel() 841 mlx5_buf_free(dev->mdev, &qp->buf); destroy_qp_kernel() 842 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); destroy_qp_kernel() 845 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) get_rx_type() argument 850 else if (!qp->has_rq) get_rx_type() 866 struct ib_udata *udata, struct mlx5_ib_qp *qp) create_qp_common() 876 mlx5_ib_odp_create_qp(qp); create_qp_common() 879 mutex_init(&qp->mutex); create_qp_common() 880 spin_lock_init(&qp->sq.lock); create_qp_common() 881 spin_lock_init(&qp->rq.lock); create_qp_common() 888 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; create_qp_common() 893 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; create_qp_common() 901 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); create_qp_common() 902 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); create_qp_common() 904 qp->wq_sig = !!wq_signature; create_qp_common() 907 qp->has_rq = qp_has_rq(init_attr); create_qp_common() 908 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, create_qp_common() 909 qp, (pd && pd->uobject) ? &ucmd : NULL); create_qp_common() 918 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || create_qp_common() 919 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { create_qp_common() 928 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); create_qp_common() 932 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); create_qp_common() 936 qp->pa_lkey = to_mpd(pd)->pa_lkey; create_qp_common() 946 qp->create_type = MLX5_QP_EMPTY; create_qp_common() 950 qp->port = init_attr->port_num; create_qp_common() 960 if (qp->wq_sig) create_qp_common() 963 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) create_qp_common() 966 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { create_qp_common() 986 if (qp->rq.wqe_cnt) { create_qp_common() 987 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); create_qp_common() 988 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; create_qp_common() 991 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); create_qp_common() 993 if (qp->sq.wqe_cnt) create_qp_common() 994 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); create_qp_common() 1027 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); create_qp_common() 1029 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); create_qp_common() 1031 mlx5_ib_dbg(dev, "create qp failed\n"); create_qp_common() 1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common() 1042 qp->mqp.event = mlx5_ib_qp_event; create_qp_common() 1047 if (qp->create_type == MLX5_QP_USER) create_qp_common() 1048 destroy_qp_user(pd, qp); create_qp_common() 1049 else if (qp->create_type == MLX5_QP_KERNEL) create_qp_common() 1050 destroy_qp_kernel(dev, qp); create_qp_common() 1114 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) get_pd() argument 1116 return to_mpd(qp->ibqp.pd); get_pd() 1119 static void get_cqs(struct mlx5_ib_qp *qp, get_cqs() argument 1122 switch (qp->ibqp.qp_type) { get_cqs() 1129 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs() 1140 *send_cq = to_mcq(qp->ibqp.send_cq); get_cqs() 1141 *recv_cq = to_mcq(qp->ibqp.recv_cq); get_cqs() 1153 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) destroy_qp_common() argument 1163 if (qp->state != IB_QPS_RESET) { destroy_qp_common() 1164 mlx5_ib_qp_disable_pagefaults(qp); destroy_qp_common() 1165 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), destroy_qp_common() 1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) destroy_qp_common() 1168 qp->mqp.qpn); destroy_qp_common() 1171 get_cqs(qp, &send_cq, &recv_cq); destroy_qp_common() 1173 if (qp->create_type == MLX5_QP_KERNEL) { destroy_qp_common() 1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common() 1176 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); destroy_qp_common() 1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common() 1182 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); destroy_qp_common() 1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); destroy_qp_common() 1188 if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_common() 1189 destroy_qp_kernel(dev, qp); destroy_qp_common() 1190 else if (qp->create_type == MLX5_QP_USER) destroy_qp_common() 1191 destroy_qp_user(&get_pd(qp)->ibpd, qp); destroy_qp_common() 1231 struct mlx5_ib_qp *qp; mlx5_ib_create_qp() local 1269 qp = kzalloc(sizeof(*qp), GFP_KERNEL); mlx5_ib_create_qp() 1270 if (!qp) mlx5_ib_create_qp() 1273 err = create_qp_common(dev, pd, init_attr, udata, qp); mlx5_ib_create_qp() 1276 kfree(qp); mlx5_ib_create_qp() 1281 qp->ibqp.qp_num = 0; mlx5_ib_create_qp() 1283 qp->ibqp.qp_num = 1; mlx5_ib_create_qp() 1285 qp->ibqp.qp_num = qp->mqp.qpn; mlx5_ib_create_qp() 1288 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, mlx5_ib_create_qp() 1291 qp->xrcdn = xrcdn; mlx5_ib_create_qp() 1300 mlx5_ib_dbg(dev, "unsupported qp type %d\n", mlx5_ib_create_qp() 1306 return &qp->ibqp; mlx5_ib_create_qp() 1309 int mlx5_ib_destroy_qp(struct ib_qp *qp) mlx5_ib_destroy_qp() argument 1311 struct mlx5_ib_dev *dev = to_mdev(qp->device); mlx5_ib_destroy_qp() 1312 struct mlx5_ib_qp *mqp = to_mqp(qp); mlx5_ib_destroy_qp() 1321 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, to_mlx5_access_flags() argument 1331 dest_rd_atomic = qp->resp_depth; to_mlx5_access_flags() 1336 access_flags = qp->atomic_rd_en; to_mlx5_access_flags() 1570 struct mlx5_ib_qp *qp = to_mqp(ibqp); __mlx5_ib_modify_qp() local 1634 context->pri_path.port = qp->port; __mlx5_ib_modify_qp() 1641 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, __mlx5_ib_modify_qp() 1657 pd = get_pd(qp); __mlx5_ib_modify_qp() 1658 get_cqs(qp, &send_cq, &recv_cq); __mlx5_ib_modify_qp() 1687 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); __mlx5_ib_modify_qp() 1698 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) __mlx5_ib_modify_qp() 1699 context->db_rec_addr = cpu_to_be64(qp->db.dma); __mlx5_ib_modify_qp() 1724 mlx5_ib_qp_disable_pagefaults(qp); __mlx5_ib_modify_qp() 1731 &qp->mqp); __mlx5_ib_modify_qp() 1736 mlx5_ib_qp_enable_pagefaults(qp); __mlx5_ib_modify_qp() 1738 qp->state = new_state; __mlx5_ib_modify_qp() 1741 qp->atomic_rd_en = attr->qp_access_flags; __mlx5_ib_modify_qp() 1743 qp->resp_depth = attr->max_dest_rd_atomic; __mlx5_ib_modify_qp() 1745 qp->port = attr->port_num; __mlx5_ib_modify_qp() 1747 qp->alt_port = attr->alt_port_num; __mlx5_ib_modify_qp() 1754 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx5_ib_modify_qp() 1757 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx5_ib_modify_qp() 1759 qp->rq.head = 0; __mlx5_ib_modify_qp() 1760 qp->rq.tail = 0; __mlx5_ib_modify_qp() 1761 qp->sq.head = 0; __mlx5_ib_modify_qp() 1762 qp->sq.tail = 0; __mlx5_ib_modify_qp() 1763 qp->sq.cur_post = 0; __mlx5_ib_modify_qp() 1764 qp->sq.last_poll = 0; __mlx5_ib_modify_qp() 1765 qp->db.db[MLX5_RCV_DBR] = 0; __mlx5_ib_modify_qp() 1766 qp->db.db[MLX5_SND_DBR] = 0; __mlx5_ib_modify_qp() 1778 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_modify_qp() local 1785 mutex_lock(&qp->mutex); mlx5_ib_modify_qp() 1787 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; mlx5_ib_modify_qp() 1800 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mlx5_ib_modify_qp() 1821 mutex_unlock(&qp->mutex); mlx5_ib_modify_qp() 2093 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, set_data_inl_seg() argument 2097 void *qend = qp->sq.qend; set_data_inl_seg() 2111 if (unlikely(inl > qp->max_inline_data)) set_data_inl_seg() 2119 wqe = mlx5_get_send_wqe(qp, 0); set_data_inl_seg() 2239 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, set_sig_data_segment() argument 2324 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment() 2325 *seg = mlx5_get_send_wqe(qp, 0); set_sig_data_segment() 2334 if (unlikely((*seg == qp->sq.qend))) set_sig_data_segment() 2335 *seg = mlx5_get_send_wqe(qp, 0); set_sig_data_segment() 2372 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, set_sig_umr_wr() argument 2376 u32 pdn = get_pd(qp)->pdn; set_sig_umr_wr() 2383 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || set_sig_umr_wr() 2405 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr() 2406 *seg = mlx5_get_send_wqe(qp, 0); set_sig_umr_wr() 2411 if (unlikely((*seg == qp->sq.qend))) set_sig_umr_wr() 2412 *seg = mlx5_get_send_wqe(qp, 0); set_sig_umr_wr() 2414 ret = set_sig_data_segment(wr, qp, seg, size); set_sig_umr_wr() 2449 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) set_frwr_li_wr() 2461 if (unlikely((*seg == qp->sq.qend))) set_frwr_li_wr() 2462 *seg = mlx5_get_send_wqe(qp, 0); set_frwr_li_wr() 2466 if (unlikely((*seg == qp->sq.qend))) set_frwr_li_wr() 2467 *seg = mlx5_get_send_wqe(qp, 0); set_frwr_li_wr() 2480 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) dump_wqe() argument 2486 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); dump_wqe() 2489 void *buf = mlx5_get_send_wqe(qp, tidx); dump_wqe() 2490 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); dump_wqe() 2501 unsigned bytecnt, struct mlx5_ib_qp *qp) mlx5_bf_copy() 2513 if (unlikely(src == qp->sq.qend)) mlx5_bf_copy() 2514 src = mlx5_get_send_wqe(qp, 0); mlx5_bf_copy() 2535 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, begin_wqe() argument 2542 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { begin_wqe() 2547 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); begin_wqe() 2548 *seg = mlx5_get_send_wqe(qp, *idx); begin_wqe() 2552 (*ctrl)->fm_ce_se = qp->sq_signal_bits | begin_wqe() 2564 static void finish_wqe(struct mlx5_ib_qp *qp, finish_wqe() argument 2572 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | finish_wqe() 2574 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); finish_wqe() 2576 qp->fm_cache = next_fence; finish_wqe() 2577 if (unlikely(qp->wq_sig)) finish_wqe() 2580 qp->sq.wrid[idx] = wr_id; finish_wqe() 2581 qp->sq.w_list[idx].opcode = mlx5_opcode; finish_wqe() 2582 qp->sq.wqe_head[idx] = qp->sq.head + nreq; finish_wqe() 2583 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); finish_wqe() 2584 qp->sq.w_list[idx].next = qp->sq.cur_post; finish_wqe() 2594 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_post_send() local 2598 struct mlx5_bf *bf = qp->bf; mlx5_ib_post_send() 2600 void *qend = qp->sq.qend; mlx5_ib_post_send() 2612 spin_lock_irqsave(&qp->sq.lock, flags); mlx5_ib_post_send() 2622 fence = qp->fm_cache; mlx5_ib_post_send() 2624 if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_post_send() 2631 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); mlx5_ib_post_send() 2667 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; mlx5_ib_post_send() 2669 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); mlx5_ib_post_send() 2680 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; mlx5_ib_post_send() 2682 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); mlx5_ib_post_send() 2692 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; mlx5_ib_post_send() 2696 err = set_sig_umr_wr(wr, qp, &seg, &size); mlx5_ib_post_send() 2703 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send() 2712 err = begin_wqe(qp, &seg, &ctrl, wr, mlx5_ib_post_send() 2730 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send() 2733 err = begin_wqe(qp, &seg, &ctrl, wr, mlx5_ib_post_send() 2752 finish_wqe(qp, ctrl, size, idx, wr->wr_id, mlx5_ib_post_send() 2785 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send() 2794 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; mlx5_ib_post_send() 2800 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send() 2805 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send() 2815 err = set_data_inl_seg(qp, wr, seg, &sz); mlx5_ib_post_send() 2827 seg = mlx5_get_send_wqe(qp, 0); mlx5_ib_post_send() 2838 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, mlx5_ib_post_send() 2843 dump_wqe(qp, idx, size); mlx5_ib_post_send() 2848 qp->sq.head += nreq; mlx5_ib_post_send() 2855 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); mlx5_ib_post_send() 2868 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); mlx5_ib_post_send() 2885 spin_unlock_irqrestore(&qp->sq.lock, flags); mlx5_ib_post_send() 2898 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_post_recv() local 2907 spin_lock_irqsave(&qp->rq.lock, flags); mlx5_ib_post_recv() 2909 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); mlx5_ib_post_recv() 2912 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mlx5_ib_post_recv() 2918 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mlx5_ib_post_recv() 2924 scat = get_recv_wqe(qp, ind); mlx5_ib_post_recv() 2925 if (qp->wq_sig) mlx5_ib_post_recv() 2931 if (i < qp->rq.max_gs) { mlx5_ib_post_recv() 2937 if (qp->wq_sig) { mlx5_ib_post_recv() 2939 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); mlx5_ib_post_recv() 2942 qp->rq.wrid[ind] = wr->wr_id; mlx5_ib_post_recv() 2944 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); mlx5_ib_post_recv() 2949 qp->rq.head += nreq; mlx5_ib_post_recv() 2956 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); mlx5_ib_post_recv() 2959 spin_unlock_irqrestore(&qp->rq.lock, flags); mlx5_ib_post_recv() 3037 struct mlx5_ib_qp *qp = to_mqp(ibqp); mlx5_ib_query_qp() local 3051 mutex_lock(&qp->mutex); mlx5_ib_query_qp() 3058 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); mlx5_ib_query_qp() 3064 qp->state = to_ib_qp_state(mlx5_state); mlx5_ib_query_qp() 3065 qp_attr->qp_state = qp->state; mlx5_ib_query_qp() 3076 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { mlx5_ib_query_qp() 3086 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mlx5_ib_query_qp() 3100 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; mlx5_ib_query_qp() 3101 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mlx5_ib_query_qp() 3104 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; mlx5_ib_query_qp() 3105 qp_attr->cap.max_send_sge = qp->sq.max_gs; mlx5_ib_query_qp() 3119 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) mlx5_ib_query_qp() 3122 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? mlx5_ib_query_qp() 3129 mutex_unlock(&qp->mutex); mlx5_ib_query_qp() 220 set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_rq_size() argument 330 calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) calc_sq_size() argument 374 set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) set_user_buf_size() argument 601 create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_create_qp_mbox_in **in, struct mlx5_ib_create_qp_resp *resp, int *inlen) create_user_qp() argument 734 create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, struct mlx5_create_qp_mbox_in **in, int *inlen) create_kernel_qp() argument 864 create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx5_ib_qp *qp) create_qp_common() argument 2448 set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) set_frwr_li_wr() argument 2500 mlx5_bf_copy(u64 __iomem *dst, u64 *src, unsigned bytecnt, struct mlx5_ib_qp *qp) mlx5_bf_copy() argument
|
H A D | odp.c | 161 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, mlx5_ib_page_fault_resume() argument 164 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_page_fault_resume() 165 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, mlx5_ib_page_fault_resume() 170 qp->mqp.qpn); mlx5_ib_page_fault_resume() 185 static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, pagefault_single_data_segment() argument 190 struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device); pagefault_single_data_segment() 219 if (mr->ibmr.pd != qp->ibqp.pd) { pagefault_single_data_segment() 302 * @qp the QP on which the fault occurred. 317 static int pagefault_data_segments(struct mlx5_ib_qp *qp, pagefault_data_segments() argument 330 if (receive_queue && qp->ibqp.srq) pagefault_data_segments() 376 ret = pagefault_single_data_segment(qp, pfault, key, io_virt, pagefault_data_segments() 391 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, mlx5_ib_mr_initiator_pfault_handler() 394 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_initiator_pfault_handler() 411 wqe_index, qp->mqp.qpn); mlx5_ib_mr_initiator_pfault_handler() 421 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler() 428 if (qp->mqp.qpn != ctrl_qpn) { mlx5_ib_mr_initiator_pfault_handler() 430 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler() 441 switch (qp->ibqp.qp_type) { mlx5_ib_mr_initiator_pfault_handler() 484 qp->ibqp.qp_type, opcode); mlx5_ib_mr_initiator_pfault_handler() 496 struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, mlx5_ib_mr_responder_pfault_handler() 499 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_responder_pfault_handler() 500 struct mlx5_ib_wq *wq = &qp->rq; mlx5_ib_mr_responder_pfault_handler() 503 if (qp->ibqp.srq) { mlx5_ib_mr_responder_pfault_handler() 508 if (qp->wq_sig) { mlx5_ib_mr_responder_pfault_handler() 518 switch (qp->ibqp.qp_type) { mlx5_ib_mr_responder_pfault_handler() 527 qp->ibqp.qp_type); mlx5_ib_mr_responder_pfault_handler() 536 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_wqe_pfault_handler() argument 539 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); mlx5_ib_mr_wqe_pfault_handler() 555 ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, mlx5_ib_mr_wqe_pfault_handler() 559 -ret, wqe_index, qp->mqp.qpn); mlx5_ib_mr_wqe_pfault_handler() 566 ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler() 569 ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler() 582 ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, mlx5_ib_mr_wqe_pfault_handler() 594 mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); mlx5_ib_mr_wqe_pfault_handler() 596 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); mlx5_ib_mr_wqe_pfault_handler() 607 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_rdma_pfault_handler() argument 645 ret = pagefault_single_data_segment(qp, pfault, rkey, address, length, mlx5_ib_mr_rdma_pfault_handler() 651 mlx5_ib_page_fault_resume(qp, pfault, 1); mlx5_ib_mr_rdma_pfault_handler() 655 mlx5_ib_page_fault_resume(qp, pfault, 0); mlx5_ib_mr_rdma_pfault_handler() 663 ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey, mlx5_ib_mr_rdma_pfault_handler() 670 qp->ibqp.qp_num, address, prefetch_len); mlx5_ib_mr_rdma_pfault_handler() 675 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, mlx5_ib_mr_pfault_handler() argument 682 mlx5_ib_mr_wqe_pfault_handler(qp, pfault); mlx5_ib_mr_pfault_handler() 685 mlx5_ib_mr_rdma_pfault_handler(qp, pfault); mlx5_ib_mr_pfault_handler() 690 mlx5_ib_page_fault_resume(qp, pfault, 1); mlx5_ib_mr_pfault_handler() 702 struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp, mlx5_ib_qp_pfault_action() local 704 mlx5_ib_mr_pfault_handler(qp, pfault); mlx5_ib_qp_pfault_action() 707 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) mlx5_ib_qp_disable_pagefaults() argument 711 spin_lock_irqsave(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_disable_pagefaults() 712 qp->disable_page_faults = 1; mlx5_ib_qp_disable_pagefaults() 713 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_disable_pagefaults() 723 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) mlx5_ib_qp_enable_pagefaults() argument 727 spin_lock_irqsave(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_enable_pagefaults() 728 qp->disable_page_faults = 0; mlx5_ib_qp_enable_pagefaults() 729 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags); mlx5_ib_qp_enable_pagefaults() 732 static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp, mlx5_ib_pfault_handler() argument 742 struct mlx5_ib_qp *mibqp = to_mibqp(qp); mlx5_ib_pfault_handler() 756 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) mlx5_ib_odp_create_qp() argument 760 qp->disable_page_faults = 1; mlx5_ib_odp_create_qp() 761 spin_lock_init(&qp->disable_page_faults_lock); mlx5_ib_odp_create_qp() 763 qp->mqp.pfault_handler = mlx5_ib_pfault_handler; mlx5_ib_odp_create_qp() 766 INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); mlx5_ib_odp_create_qp() 390 mlx5_ib_mr_initiator_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_initiator_pfault_handler() argument 495 mlx5_ib_mr_responder_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_responder_pfault_handler() argument
|
H A D | mlx5_ib.h | 42 #include <linux/mlx5/qp.h> 193 /* serialize qp state modifications 351 struct ib_qp *qp; member in struct:umr_common 551 int mlx5_ib_destroy_qp(struct ib_qp *qp); 556 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); 557 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 621 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, 623 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); 628 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); 629 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); 639 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} mlx5_ib_odp_init_one() argument 644 static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {} mlx5_ib_qp_enable_pagefaults() argument 645 static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} mlx5_ib_qp_enable_pagefaults() argument
|
H A D | cq.c | 171 struct mlx5_ib_qp *qp) handle_responder() 173 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); handle_responder() 179 if (qp->ibqp.srq || qp->ibqp.xrcd) { handle_responder() 182 if (qp->ibqp.xrcd) { handle_responder() 187 srq = to_msrq(qp->ibqp.srq); handle_responder() 197 wq = &qp->rq; handle_responder() 304 static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx) is_atomic_response() argument 311 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx) mlx5_get_atomic_laddr() argument 316 dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) + mlx5_get_atomic_laddr() 323 static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, handle_atomic() argument 330 if (!is_atomic_response(qp, idx)) handle_atomic() 334 addr = mlx5_get_atomic_laddr(qp, idx); handle_atomic() 348 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, handle_atomics() argument 354 idx = tail & (qp->sq.wqe_cnt - 1); handle_atomics() 355 handle_atomic(qp, cqe64, idx); handle_atomics() 359 tail = qp->sq.w_list[idx].next; handle_atomics() 361 tail = qp->sq.w_list[idx].next; handle_atomics() 362 qp->sq.last_poll = tail; handle_atomics() 463 wc->qp = &(*cur_qp)->ibqp; mlx5_poll_one() 170 handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_qp *qp) handle_responder() argument
|
H A D | main.c | 956 mlx5_ib_destroy_qp(dev->umrc.qp); destroy_umrc_res() 972 struct ib_qp *qp; create_umr_res() local 1013 qp = mlx5_ib_create_qp(pd, init_attr, NULL); create_umr_res() 1014 if (IS_ERR(qp)) { create_umr_res() 1016 ret = PTR_ERR(qp); create_umr_res() 1019 qp->device = &dev->ib_dev; create_umr_res() 1020 qp->real_qp = qp; create_umr_res() 1021 qp->uobject = NULL; create_umr_res() 1022 qp->qp_type = MLX5_IB_QPT_REG_UMR; create_umr_res() 1026 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | create_umr_res() 1037 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); create_umr_res() 1045 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); create_umr_res() 1051 dev->umrc.qp = qp; create_umr_res() 1069 mlx5_ib_destroy_qp(qp); create_umr_res()
|
H A D | mr.c | 812 err = ib_post_send(umrc->qp, &wr, &bad); reg_umr() 943 err = ib_post_send(umrc->qp, &wr, &bad); mlx5_ib_update_mtt() 1139 err = ib_post_send(umrc->qp, &wr, &bad); unreg_umr()
|
H A D | srq.c | 34 #include <linux/mlx5/qp.h>
|
/linux-4.1.27/drivers/ntb/ |
H A D | ntb_transport.c | 84 struct ntb_transport_qp *qp; member in struct:ntb_queue_entry 109 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 119 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 203 #define QP_TO_MW(ndev, qp) ((qp) % ntb_max_mw(ndev)) 398 struct ntb_transport_qp *qp; debugfs_read() local 408 qp = filp->private_data; debugfs_read() 413 "rx_bytes - \t%llu\n", qp->rx_bytes); debugfs_read() 415 "rx_pkts - \t%llu\n", qp->rx_pkts); debugfs_read() 417 "rx_memcpy - \t%llu\n", qp->rx_memcpy); debugfs_read() 419 "rx_async - \t%llu\n", qp->rx_async); debugfs_read() 421 "rx_ring_empty - %llu\n", qp->rx_ring_empty); debugfs_read() 423 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); debugfs_read() 425 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); debugfs_read() 427 "rx_err_ver - \t%llu\n", qp->rx_err_ver); debugfs_read() 429 "rx_buff - \t%p\n", qp->rx_buff); debugfs_read() 431 "rx_index - \t%u\n", qp->rx_index); debugfs_read() 433 "rx_max_entry - \t%u\n", qp->rx_max_entry); debugfs_read() 436 "tx_bytes - \t%llu\n", qp->tx_bytes); debugfs_read() 438 "tx_pkts - \t%llu\n", qp->tx_pkts); debugfs_read() 440 "tx_memcpy - \t%llu\n", qp->tx_memcpy); debugfs_read() 442 "tx_async - \t%llu\n", qp->tx_async); debugfs_read() 444 "tx_ring_full - \t%llu\n", qp->tx_ring_full); debugfs_read() 446 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); debugfs_read() 448 "tx_mw - \t%p\n", qp->tx_mw); debugfs_read() 450 "tx_index - \t%u\n", qp->tx_index); debugfs_read() 452 "tx_max_entry - \t%u\n", qp->tx_max_entry); debugfs_read() 455 "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ? debugfs_read() 503 struct ntb_transport_qp *qp = &nt->qps[qp_num]; ntb_transport_setup_qp_mw() local 519 qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size; ntb_transport_setup_qp_mw() 522 qp->remote_rx_info = qp->rx_buff + rx_size; ntb_transport_setup_qp_mw() 525 qp->rx_max_frame = min(transport_mtu, rx_size / 2); ntb_transport_setup_qp_mw() 526 qp->rx_max_entry = rx_size / qp->rx_max_frame; ntb_transport_setup_qp_mw() 527 qp->rx_index = 0; ntb_transport_setup_qp_mw() 529 qp->remote_rx_info->entry = qp->rx_max_entry - 1; ntb_transport_setup_qp_mw() 532 for (i = 0; i < qp->rx_max_entry; i++) { ntb_transport_setup_qp_mw() 533 void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) - ntb_transport_setup_qp_mw() 538 qp->rx_pkts = 0; ntb_transport_setup_qp_mw() 539 qp->tx_pkts = 0; ntb_transport_setup_qp_mw() 540 qp->tx_index = 0; ntb_transport_setup_qp_mw() 598 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ntb_qp_link_cleanup() argument 600 struct ntb_transport *nt = qp->transport; ntb_qp_link_cleanup() 603 if (qp->qp_link == NTB_LINK_DOWN) { ntb_qp_link_cleanup() 604 cancel_delayed_work_sync(&qp->link_work); ntb_qp_link_cleanup() 608 if (qp->event_handler) ntb_qp_link_cleanup() 609 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); ntb_qp_link_cleanup() 611 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); ntb_qp_link_cleanup() 612 qp->qp_link = NTB_LINK_DOWN; ntb_qp_link_cleanup() 617 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_cleanup_work() local 620 struct ntb_transport *nt = qp->transport; ntb_qp_link_cleanup_work() 622 ntb_qp_link_cleanup(qp); ntb_qp_link_cleanup_work() 625 schedule_delayed_work(&qp->link_work, ntb_qp_link_cleanup_work() 629 static void ntb_qp_link_down(struct ntb_transport_qp *qp) ntb_qp_link_down() argument 631 schedule_work(&qp->link_cleanup); ntb_qp_link_down() 793 struct ntb_transport_qp *qp = &nt->qps[i]; ntb_transport_link_work() local 797 if (qp->client_ready == NTB_LINK_UP) ntb_transport_link_work() 798 schedule_delayed_work(&qp->link_work, 0); ntb_transport_link_work() 814 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_work() local 817 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); ntb_qp_link_work() 818 struct ntb_transport *nt = qp->transport; ntb_qp_link_work() 829 rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num); ntb_qp_link_work() 832 val | 1 << qp->qp_num, QP_LINKS); ntb_qp_link_work() 834 /* query remote spad for qp ready bits */ ntb_qp_link_work() 842 if (1 << qp->qp_num & val) { ntb_qp_link_work() 843 qp->qp_link = NTB_LINK_UP; ntb_qp_link_work() 845 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); ntb_qp_link_work() 846 if (qp->event_handler) ntb_qp_link_work() 847 qp->event_handler(qp->cb_data, NTB_LINK_UP); ntb_qp_link_work() 849 schedule_delayed_work(&qp->link_work, ntb_qp_link_work() 856 struct ntb_transport_qp *qp; ntb_transport_init_queue() local 864 qp = &nt->qps[qp_num]; ntb_transport_init_queue() 865 qp->qp_num = qp_num; ntb_transport_init_queue() 866 qp->transport = nt; ntb_transport_init_queue() 867 qp->ndev = nt->ndev; ntb_transport_init_queue() 868 qp->qp_link = NTB_LINK_DOWN; ntb_transport_init_queue() 869 qp->client_ready = NTB_LINK_DOWN; ntb_transport_init_queue() 870 qp->event_handler = NULL; ntb_transport_init_queue() 877 tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw; ntb_transport_init_queue() 879 qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset; ntb_transport_init_queue() 880 if (!qp->tx_mw) ntb_transport_init_queue() 883 qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset; ntb_transport_init_queue() 884 if (!qp->tx_mw_phys) ntb_transport_init_queue() 888 qp->rx_info = qp->tx_mw + tx_size; ntb_transport_init_queue() 891 qp->tx_max_frame = min(transport_mtu, tx_size / 2); ntb_transport_init_queue() 892 qp->tx_max_entry = tx_size / qp->tx_max_frame; ntb_transport_init_queue() 897 snprintf(debugfs_name, 4, "qp%d", qp_num); ntb_transport_init_queue() 898 qp->debugfs_dir = debugfs_create_dir(debugfs_name, ntb_transport_init_queue() 901 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, ntb_transport_init_queue() 902 qp->debugfs_dir, qp, ntb_transport_init_queue() 906 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); ntb_transport_init_queue() 907 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); ntb_transport_init_queue() 909 spin_lock_init(&qp->ntb_rx_pend_q_lock); ntb_transport_init_queue() 910 spin_lock_init(&qp->ntb_rx_free_q_lock); ntb_transport_init_queue() 911 spin_lock_init(&qp->ntb_tx_free_q_lock); ntb_transport_init_queue() 913 INIT_LIST_HEAD(&qp->rx_pend_q); ntb_transport_init_queue() 914 INIT_LIST_HEAD(&qp->rx_free_q); ntb_transport_init_queue() 915 INIT_LIST_HEAD(&qp->tx_free_q); ntb_transport_init_queue() 1001 /* verify that all the qp's are freed */ ntb_transport_free() 1026 struct ntb_transport_qp *qp = entry->qp; ntb_rx_copy_callback() local 1035 iowrite32(entry->index, &qp->rx_info->entry); ntb_rx_copy_callback() 1037 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); ntb_rx_copy_callback() 1039 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) ntb_rx_copy_callback() 1040 qp->rx_handler(qp, qp->cb_data, cb_data, len); ntb_rx_copy_callback() 1057 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx() local 1058 struct dma_chan *chan = qp->dma_chan; ntb_async_rx() 1115 qp->last_cookie = cookie; ntb_async_rx() 1117 qp->rx_async++; ntb_async_rx() 1130 dma_sync_wait(chan, qp->last_cookie); ntb_async_rx() 1133 qp->rx_memcpy++; ntb_async_rx() 1136 static int ntb_process_rxc(struct ntb_transport_qp *qp) ntb_process_rxc() argument 1142 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; ntb_process_rxc() 1143 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); ntb_process_rxc() 1145 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); ntb_process_rxc() 1147 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, ntb_process_rxc() 1150 qp->rx_err_no_buf++; ntb_process_rxc() 1155 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, ntb_process_rxc() 1156 &qp->rx_pend_q); ntb_process_rxc() 1157 qp->rx_ring_empty++; ntb_process_rxc() 1161 if (hdr->ver != (u32) qp->rx_pkts) { ntb_process_rxc() 1162 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, ntb_process_rxc() 1163 "qp %d: version mismatch, expected %llu - got %u\n", ntb_process_rxc() 1164 qp->qp_num, qp->rx_pkts, hdr->ver); ntb_process_rxc() 1165 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, ntb_process_rxc() 1166 &qp->rx_pend_q); ntb_process_rxc() 1167 qp->rx_err_ver++; ntb_process_rxc() 1172 ntb_qp_link_down(qp); ntb_process_rxc() 1177 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, ntb_process_rxc() 1179 qp->rx_index, hdr->ver, hdr->len, entry->len); ntb_process_rxc() 1181 qp->rx_bytes += hdr->len; ntb_process_rxc() 1182 qp->rx_pkts++; ntb_process_rxc() 1185 qp->rx_err_oflow++; ntb_process_rxc() 1186 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, ntb_process_rxc() 1193 entry->index = qp->rx_index; ntb_process_rxc() 1199 qp->rx_index++; ntb_process_rxc() 1200 qp->rx_index %= qp->rx_max_entry; ntb_process_rxc() 1205 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); ntb_process_rxc() 1209 iowrite32(qp->rx_index, &qp->rx_info->entry); ntb_process_rxc() 1216 struct ntb_transport_qp *qp = data; ntb_transport_rxc_db() local 1219 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n", ntb_transport_rxc_db() 1225 for (i = 0; i < qp->rx_max_entry; i++) { ntb_transport_rxc_db() 1226 rc = ntb_process_rxc(qp); ntb_transport_rxc_db() 1231 if (qp->dma_chan) ntb_transport_rxc_db() 1232 dma_async_issue_pending(qp->dma_chan); ntb_transport_rxc_db() 1240 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback() local 1247 ntb_ring_doorbell(qp->ndev, qp->qp_num); ntb_tx_copy_callback() 1254 qp->tx_bytes += entry->len; ntb_tx_copy_callback() 1256 if (qp->tx_handler) ntb_tx_copy_callback() 1257 qp->tx_handler(qp, qp->cb_data, entry->cb_data, ntb_tx_copy_callback() 1261 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); ntb_tx_copy_callback() 1271 static void ntb_async_tx(struct ntb_transport_qp *qp, ntb_async_tx() argument 1276 struct dma_chan *chan = qp->dma_chan; ntb_async_tx() 1286 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; ntb_async_tx() 1287 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); ntb_async_tx() 1291 iowrite32((u32) qp->tx_pkts, &hdr->ver); ntb_async_tx() 1300 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index; ntb_async_tx() 1335 qp->tx_async++; ntb_async_tx() 1344 qp->tx_memcpy++; ntb_async_tx() 1347 static int ntb_process_tx(struct ntb_transport_qp *qp, ntb_process_tx() argument 1350 dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n", ntb_process_tx() 1351 qp->tx_pkts, qp->tx_index, entry->len, entry->flags, ntb_process_tx() 1353 if (qp->tx_index == qp->remote_rx_info->entry) { ntb_process_tx() 1354 qp->tx_ring_full++; ntb_process_tx() 1358 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { ntb_process_tx() 1359 if (qp->tx_handler) ntb_process_tx() 1360 qp->tx_handler(qp->cb_data, qp, NULL, -EIO); ntb_process_tx() 1362 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_process_tx() 1363 &qp->tx_free_q); ntb_process_tx() 1367 ntb_async_tx(qp, entry); ntb_process_tx() 1369 qp->tx_index++; ntb_process_tx() 1370 qp->tx_index %= qp->tx_max_entry; ntb_process_tx() 1372 qp->tx_pkts++; ntb_process_tx() 1377 static void ntb_send_link_down(struct ntb_transport_qp *qp) ntb_send_link_down() argument 1379 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); ntb_send_link_down() 1383 if (qp->qp_link == NTB_LINK_DOWN) ntb_send_link_down() 1386 qp->qp_link = NTB_LINK_DOWN; ntb_send_link_down() 1387 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); ntb_send_link_down() 1390 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_send_link_down() 1404 rc = ntb_process_tx(qp, entry); ntb_send_link_down() 1407 qp->qp_num); ntb_send_link_down() 1429 struct ntb_transport_qp *qp; ntb_transport_create_queue() local 1447 qp = &nt->qps[free_queue]; ntb_transport_create_queue() 1448 qp->cb_data = data; ntb_transport_create_queue() 1449 qp->rx_handler = handlers->rx_handler; ntb_transport_create_queue() 1450 qp->tx_handler = handlers->tx_handler; ntb_transport_create_queue() 1451 qp->event_handler = handlers->event_handler; ntb_transport_create_queue() 1454 qp->dma_chan = dma_find_channel(DMA_MEMCPY); ntb_transport_create_queue() 1455 if (!qp->dma_chan) { ntb_transport_create_queue() 1465 entry->qp = qp; ntb_transport_create_queue() 1466 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, ntb_transport_create_queue() 1467 &qp->rx_free_q); ntb_transport_create_queue() 1475 entry->qp = qp; ntb_transport_create_queue() 1476 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_create_queue() 1477 &qp->tx_free_q); ntb_transport_create_queue() 1480 rc = ntb_register_db_callback(qp->ndev, free_queue, qp, ntb_transport_create_queue() 1485 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); ntb_transport_create_queue() 1487 return qp; ntb_transport_create_queue() 1490 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_create_queue() 1493 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) ntb_transport_create_queue() 1495 if (qp->dma_chan) ntb_transport_create_queue() 1505 * @qp: NTB queue to be freed 1509 void ntb_transport_free_queue(struct ntb_transport_qp *qp) ntb_transport_free_queue() argument 1514 if (!qp) ntb_transport_free_queue() 1517 pdev = ntb_query_pdev(qp->ndev); ntb_transport_free_queue() 1519 if (qp->dma_chan) { ntb_transport_free_queue() 1520 struct dma_chan *chan = qp->dma_chan; ntb_transport_free_queue() 1524 qp->dma_chan = NULL; ntb_transport_free_queue() 1529 dma_sync_wait(chan, qp->last_cookie); ntb_transport_free_queue() 1534 ntb_unregister_db_callback(qp->ndev, qp->qp_num); ntb_transport_free_queue() 1536 cancel_delayed_work_sync(&qp->link_work); ntb_transport_free_queue() 1538 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) ntb_transport_free_queue() 1541 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { ntb_transport_free_queue() 1546 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) ntb_transport_free_queue() 1549 set_bit(qp->qp_num, &qp->transport->qp_bitmap); ntb_transport_free_queue() 1551 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); ntb_transport_free_queue() 1557 * @qp: NTB queue to be freed 1561 * shutdown of qp. 1565 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) ntb_transport_rx_remove() argument 1570 if (!qp || qp->client_ready == NTB_LINK_UP) ntb_transport_rx_remove() 1573 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); ntb_transport_rx_remove() 1580 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); ntb_transport_rx_remove() 1588 * @qp: NTB transport layer queue the entry is to be enqueued on 1598 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_transport_rx_enqueue() argument 1603 if (!qp) ntb_transport_rx_enqueue() 1606 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); ntb_transport_rx_enqueue() 1614 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); ntb_transport_rx_enqueue() 1622 * @qp: NTB transport layer queue the entry is to be enqueued on 1629 * serialize access to the qp. 1633 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, ntb_transport_tx_enqueue() argument 1639 if (!qp || qp->qp_link != NTB_LINK_UP || !len) ntb_transport_tx_enqueue() 1642 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); ntb_transport_tx_enqueue() 1644 qp->tx_err_no_buf++; ntb_transport_tx_enqueue() 1653 rc = ntb_process_tx(qp, entry); ntb_transport_tx_enqueue() 1655 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, ntb_transport_tx_enqueue() 1656 &qp->tx_free_q); ntb_transport_tx_enqueue() 1664 * @qp: NTB transport layer queue to be enabled 1668 void ntb_transport_link_up(struct ntb_transport_qp *qp) ntb_transport_link_up() argument 1670 if (!qp) ntb_transport_link_up() 1673 qp->client_ready = NTB_LINK_UP; ntb_transport_link_up() 1675 if (qp->transport->transport_link == NTB_LINK_UP) ntb_transport_link_up() 1676 schedule_delayed_work(&qp->link_work, 0); ntb_transport_link_up() 1682 * @qp: NTB transport layer queue to be disabled 1688 void ntb_transport_link_down(struct ntb_transport_qp *qp) ntb_transport_link_down() argument 1693 if (!qp) ntb_transport_link_down() 1696 pdev = ntb_query_pdev(qp->ndev); ntb_transport_link_down() 1697 qp->client_ready = NTB_LINK_DOWN; ntb_transport_link_down() 1699 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); ntb_transport_link_down() 1705 rc = ntb_write_remote_spad(qp->ndev, QP_LINKS, ntb_transport_link_down() 1706 val & ~(1 << qp->qp_num)); ntb_transport_link_down() 1709 val & ~(1 << qp->qp_num), QP_LINKS); ntb_transport_link_down() 1711 if (qp->qp_link == NTB_LINK_UP) ntb_transport_link_down() 1712 ntb_send_link_down(qp); ntb_transport_link_down() 1714 cancel_delayed_work_sync(&qp->link_work); ntb_transport_link_down() 1720 * @qp: NTB transport layer queue to be queried 1726 bool ntb_transport_link_query(struct ntb_transport_qp *qp) ntb_transport_link_query() argument 1728 if (!qp) ntb_transport_link_query() 1731 return qp->qp_link == NTB_LINK_UP; ntb_transport_link_query() 1736 * ntb_transport_qp_num - Query the qp number 1737 * @qp: NTB transport layer queue to be queried 1739 * Query qp number of the NTB transport queue 1741 * RETURNS: a zero based number specifying the qp number 1743 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) ntb_transport_qp_num() argument 1745 if (!qp) ntb_transport_qp_num() 1748 return qp->qp_num; ntb_transport_qp_num() 1753 * ntb_transport_max_size - Query the max payload size of a qp 1754 * @qp: NTB transport layer queue to be queried 1756 * Query the maximum payload size permissible on the given qp 1758 * RETURNS: the max payload size of a qp 1760 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) ntb_transport_max_size() argument 1764 if (!qp) ntb_transport_max_size() 1767 if (!qp->dma_chan) ntb_transport_max_size() 1768 return qp->tx_max_frame - sizeof(struct ntb_payload_header); ntb_transport_max_size() 1771 max = qp->tx_max_frame - sizeof(struct ntb_payload_header); ntb_transport_max_size() 1772 max -= max % (1 << qp->dma_chan->device->copy_align); ntb_transport_max_size()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) is_sqp() argument 197 return qp->qpn >= dev->qp_table.sqp_start && is_sqp() 198 qp->qpn <= dev->qp_table.sqp_start + 3; is_sqp() 201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) is_qp0() argument 203 return qp->qpn >= dev->qp_table.sqp_start && is_qp0() 204 qp->qpn <= dev->qp_table.sqp_start + 1; is_qp0() 207 static void *get_recv_wqe(struct mthca_qp *qp, int n) get_recv_wqe() argument 209 if (qp->is_direct) get_recv_wqe() 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); get_recv_wqe() 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + get_recv_wqe() 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); get_recv_wqe() 216 static void *get_send_wqe(struct mthca_qp *qp, int n) get_send_wqe() argument 218 if (qp->is_direct) get_send_wqe() 219 return qp->queue.direct.buf + qp->send_wqe_offset + get_send_wqe() 220 (n << qp->sq.wqe_shift); get_send_wqe() 222 return qp->queue.page_list[(qp->send_wqe_offset + get_send_wqe() 223 (n << qp->sq.wqe_shift)) >> get_send_wqe() 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & get_send_wqe() 240 struct mthca_qp *qp; mthca_qp_event() local 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); mthca_qp_event() 245 if (qp) mthca_qp_event() 246 ++qp->refcount; mthca_qp_event() 249 if (!qp) { mthca_qp_event() 256 qp->port = qp->alt_port; mthca_qp_event() 260 event.element.qp = &qp->ibqp; mthca_qp_event() 261 if (qp->ibqp.event_handler) mthca_qp_event() 262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); mthca_qp_event() 265 if (!--qp->refcount) mthca_qp_event() 266 wake_up(&qp->wait); mthca_qp_event() 327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, get_hw_access_flags() argument 337 dest_rd_atomic = qp->resp_depth; get_hw_access_flags() 342 access_flags = qp->atomic_rd_en; get_hw_access_flags() 428 struct mthca_qp *qp = to_mqp(ibqp); mthca_query_qp() local 435 mutex_lock(&qp->mutex); mthca_query_qp() 437 if (qp->state == IB_QPS_RESET) { mthca_query_qp() 448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); mthca_query_qp() 458 qp->state = to_ib_qp_state(mthca_state); mthca_query_qp() 459 qp_attr->qp_state = qp->state; mthca_query_qp() 470 if (qp->transport == RC || qp->transport == UC) { mthca_query_qp() 482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ mthca_query_qp() 498 qp_attr->cap.max_send_wr = qp->sq.max; mthca_query_qp() 499 qp_attr->cap.max_recv_wr = qp->rq.max; mthca_query_qp() 500 qp_attr->cap.max_send_sge = qp->sq.max_gs; mthca_query_qp() 501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; mthca_query_qp() 502 qp_attr->cap.max_inline_data = qp->max_inline_data; mthca_query_qp() 505 qp_init_attr->sq_sig_type = qp->sq_policy; mthca_query_qp() 511 mutex_unlock(&qp->mutex); mthca_query_qp() 548 struct mthca_qp *qp = to_mqp(ibqp); __mthca_modify_qp() local 565 (to_mthca_st(qp->transport) << 16)); __mthca_modify_qp() 586 if (qp->transport == MLX || qp->transport == UD) __mthca_modify_qp() 598 if (qp->rq.max) __mthca_modify_qp() 599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; __mthca_modify_qp() 600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; __mthca_modify_qp() 602 if (qp->sq.max) __mthca_modify_qp() 603 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; __mthca_modify_qp() 604 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; __mthca_modify_qp() 609 if (qp->ibqp.uobject) __mthca_modify_qp() 611 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); __mthca_modify_qp() 614 qp_context->local_qpn = cpu_to_be32(qp->qpn); __mthca_modify_qp() 619 if (qp->transport == MLX) __mthca_modify_qp() 621 cpu_to_be32(qp->port << 24); __mthca_modify_qp() 645 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) __mthca_modify_qp() 695 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); __mthca_modify_qp() 699 if (qp->sq_policy == IB_SIGNAL_ALL_WR) __mthca_modify_qp() 722 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); __mthca_modify_qp() 723 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); __mthca_modify_qp() 735 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); __mthca_modify_qp() 755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << __mthca_modify_qp() 761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); __mthca_modify_qp() 777 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, __mthca_modify_qp() 785 qp->state = new_state; __mthca_modify_qp() 787 qp->atomic_rd_en = attr->qp_access_flags; __mthca_modify_qp() 789 qp->resp_depth = attr->max_dest_rd_atomic; __mthca_modify_qp() 791 qp->port = attr->port_num; __mthca_modify_qp() 793 qp->alt_port = attr->alt_port_num; __mthca_modify_qp() 795 if (is_sqp(dev, qp)) __mthca_modify_qp() 796 store_attrs(to_msqp(qp), attr, attr_mask); __mthca_modify_qp() 802 if (is_qp0(dev, qp)) { __mthca_modify_qp() 805 init_port(dev, qp->port); __mthca_modify_qp() 811 mthca_CLOSE_IB(dev, qp->port); __mthca_modify_qp() 818 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { __mthca_modify_qp() 819 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, __mthca_modify_qp() 820 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); __mthca_modify_qp() 821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) __mthca_modify_qp() 822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); __mthca_modify_qp() 824 mthca_wq_reset(&qp->sq); __mthca_modify_qp() 825 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); __mthca_modify_qp() 827 mthca_wq_reset(&qp->rq); __mthca_modify_qp() 828 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); __mthca_modify_qp() 831 *qp->sq.db = 0; __mthca_modify_qp() 832 *qp->rq.db = 0; __mthca_modify_qp() 846 struct mthca_qp *qp = to_mqp(ibqp); mthca_modify_qp() local 850 mutex_lock(&qp->mutex); mthca_modify_qp() 854 spin_lock_irq(&qp->sq.lock); mthca_modify_qp() 855 spin_lock(&qp->rq.lock); mthca_modify_qp() 856 cur_state = qp->state; mthca_modify_qp() 857 spin_unlock(&qp->rq.lock); mthca_modify_qp() 858 spin_unlock_irq(&qp->sq.lock); mthca_modify_qp() 867 qp->transport, cur_state, new_state, mthca_modify_qp() 907 mutex_unlock(&qp->mutex); mthca_modify_qp() 911 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) mthca_max_data_size() argument 919 switch (qp->transport) { mthca_max_data_size() 947 struct mthca_qp *qp) mthca_adjust_qp_caps() 949 int max_data_size = mthca_max_data_size(dev, qp, mthca_adjust_qp_caps() 951 1 << qp->sq.wqe_shift)); mthca_adjust_qp_caps() 953 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); mthca_adjust_qp_caps() 955 qp->sq.max_gs = min_t(int, dev->limits.max_sg, mthca_adjust_qp_caps() 957 qp->rq.max_gs = min_t(int, dev->limits.max_sg, mthca_adjust_qp_caps() 958 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - mthca_adjust_qp_caps() 964 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 972 struct mthca_qp *qp) mthca_alloc_wqe_buf() 978 qp->rq.max_gs * sizeof (struct mthca_data_seg); mthca_alloc_wqe_buf() 983 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; mthca_alloc_wqe_buf() 984 qp->rq.wqe_shift++) mthca_alloc_wqe_buf() 987 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); mthca_alloc_wqe_buf() 988 switch (qp->transport) { mthca_alloc_wqe_buf() 1027 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; mthca_alloc_wqe_buf() 1028 qp->sq.wqe_shift++) mthca_alloc_wqe_buf() 1031 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, mthca_alloc_wqe_buf() 1032 1 << qp->sq.wqe_shift); mthca_alloc_wqe_buf() 1042 size = PAGE_ALIGN(qp->send_wqe_offset + mthca_alloc_wqe_buf() 1043 (qp->sq.max << qp->sq.wqe_shift)); mthca_alloc_wqe_buf() 1045 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), mthca_alloc_wqe_buf() 1047 if (!qp->wrid) mthca_alloc_wqe_buf() 1051 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); mthca_alloc_wqe_buf() 1058 kfree(qp->wrid); mthca_alloc_wqe_buf() 1063 struct mthca_qp *qp) mthca_free_wqe_buf() 1065 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + mthca_free_wqe_buf() 1066 (qp->sq.max << qp->sq.wqe_shift)), mthca_free_wqe_buf() 1067 &qp->queue, qp->is_direct, &qp->mr); mthca_free_wqe_buf() 1068 kfree(qp->wrid); mthca_free_wqe_buf() 1072 struct mthca_qp *qp) mthca_map_memfree() 1077 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); mthca_map_memfree() 1081 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); mthca_map_memfree() 1086 qp->qpn << dev->qp_table.rdb_shift); mthca_map_memfree() 1095 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_map_memfree() 1098 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); mthca_map_memfree() 1104 struct mthca_qp *qp) mthca_unmap_memfree() 1107 qp->qpn << dev->qp_table.rdb_shift); mthca_unmap_memfree() 1108 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); mthca_unmap_memfree() 1109 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); mthca_unmap_memfree() 1113 struct mthca_qp *qp) mthca_alloc_memfree() 1116 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, mthca_alloc_memfree() 1117 qp->qpn, &qp->rq.db); mthca_alloc_memfree() 1118 if (qp->rq.db_index < 0) mthca_alloc_memfree() 1121 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, mthca_alloc_memfree() 1122 qp->qpn, &qp->sq.db); mthca_alloc_memfree() 1123 if (qp->sq.db_index < 0) { mthca_alloc_memfree() 1124 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); mthca_alloc_memfree() 1133 struct mthca_qp *qp) mthca_free_memfree() 1136 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); mthca_free_memfree() 1137 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); mthca_free_memfree() 1146 struct mthca_qp *qp) mthca_alloc_qp_common() 1152 qp->refcount = 1; mthca_alloc_qp_common() 1153 init_waitqueue_head(&qp->wait); mthca_alloc_qp_common() 1154 mutex_init(&qp->mutex); mthca_alloc_qp_common() 1155 qp->state = IB_QPS_RESET; mthca_alloc_qp_common() 1156 qp->atomic_rd_en = 0; mthca_alloc_qp_common() 1157 qp->resp_depth = 0; mthca_alloc_qp_common() 1158 qp->sq_policy = send_policy; mthca_alloc_qp_common() 1159 mthca_wq_reset(&qp->sq); mthca_alloc_qp_common() 1160 mthca_wq_reset(&qp->rq); mthca_alloc_qp_common() 1162 spin_lock_init(&qp->sq.lock); mthca_alloc_qp_common() 1163 spin_lock_init(&qp->rq.lock); mthca_alloc_qp_common() 1165 ret = mthca_map_memfree(dev, qp); mthca_alloc_qp_common() 1169 ret = mthca_alloc_wqe_buf(dev, pd, qp); mthca_alloc_qp_common() 1171 mthca_unmap_memfree(dev, qp); mthca_alloc_qp_common() 1175 mthca_adjust_qp_caps(dev, pd, qp); mthca_alloc_qp_common() 1185 ret = mthca_alloc_memfree(dev, qp); mthca_alloc_qp_common() 1187 mthca_free_wqe_buf(dev, qp); mthca_alloc_qp_common() 1188 mthca_unmap_memfree(dev, qp); mthca_alloc_qp_common() 1195 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; mthca_alloc_qp_common() 1197 for (i = 0; i < qp->rq.max; ++i) { mthca_alloc_qp_common() 1198 next = get_recv_wqe(qp, i); mthca_alloc_qp_common() 1199 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << mthca_alloc_qp_common() 1200 qp->rq.wqe_shift); mthca_alloc_qp_common() 1204 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); mthca_alloc_qp_common() 1209 for (i = 0; i < qp->sq.max; ++i) { mthca_alloc_qp_common() 1210 next = get_send_wqe(qp, i); mthca_alloc_qp_common() 1211 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << mthca_alloc_qp_common() 1212 qp->sq.wqe_shift) + mthca_alloc_qp_common() 1213 qp->send_wqe_offset); mthca_alloc_qp_common() 1216 for (i = 0; i < qp->rq.max; ++i) { mthca_alloc_qp_common() 1217 next = get_recv_wqe(qp, i); mthca_alloc_qp_common() 1218 next->nda_op = htonl((((i + 1) % qp->rq.max) << mthca_alloc_qp_common() 1219 qp->rq.wqe_shift) | 1); mthca_alloc_qp_common() 1224 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); mthca_alloc_qp_common() 1225 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); mthca_alloc_qp_common() 1231 struct mthca_pd *pd, struct mthca_qp *qp) mthca_set_qp_size() 1233 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); mthca_set_qp_size() 1247 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) mthca_set_qp_size() 1251 qp->rq.max = cap->max_recv_wr ? mthca_set_qp_size() 1253 qp->sq.max = cap->max_send_wr ? mthca_set_qp_size() 1256 qp->rq.max = cap->max_recv_wr; mthca_set_qp_size() 1257 qp->sq.max = cap->max_send_wr; mthca_set_qp_size() 1260 qp->rq.max_gs = cap->max_recv_sge; mthca_set_qp_size() 1261 qp->sq.max_gs = max_t(int, cap->max_send_sge, mthca_set_qp_size() 1276 struct mthca_qp *qp) mthca_alloc_qp() 1281 case IB_QPT_RC: qp->transport = RC; break; mthca_alloc_qp() 1282 case IB_QPT_UC: qp->transport = UC; break; mthca_alloc_qp() 1283 case IB_QPT_UD: qp->transport = UD; break; mthca_alloc_qp() 1287 err = mthca_set_qp_size(dev, cap, pd, qp); mthca_alloc_qp() 1291 qp->qpn = mthca_alloc(&dev->qp_table.alloc); mthca_alloc_qp() 1292 if (qp->qpn == -1) mthca_alloc_qp() 1296 qp->port = 0; mthca_alloc_qp() 1299 send_policy, qp); mthca_alloc_qp() 1301 mthca_free(&dev->qp_table.alloc, qp->qpn); mthca_alloc_qp() 1306 mthca_array_set(&dev->qp_table.qp, mthca_alloc_qp() 1307 qp->qpn & (dev->limits.num_qps - 1), qp); mthca_alloc_qp() 1356 sqp->qp.transport = MLX; mthca_alloc_sqp() 1357 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); mthca_alloc_sqp() 1361 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; mthca_alloc_sqp() 1368 if (mthca_array_get(&dev->qp_table.qp, mqpn)) mthca_alloc_sqp() 1371 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); mthca_alloc_sqp() 1377 sqp->qp.port = port; mthca_alloc_sqp() 1378 sqp->qp.qpn = mqpn; mthca_alloc_sqp() 1379 sqp->qp.transport = MLX; mthca_alloc_sqp() 1382 send_policy, &sqp->qp); mthca_alloc_sqp() 1398 mthca_array_clear(&dev->qp_table.qp, mqpn); mthca_alloc_sqp() 1410 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) get_qp_refcount() argument 1415 c = qp->refcount; get_qp_refcount() 1422 struct mthca_qp *qp) mthca_free_qp() 1427 send_cq = to_mcq(qp->ibqp.send_cq); mthca_free_qp() 1428 recv_cq = to_mcq(qp->ibqp.recv_cq); mthca_free_qp() 1437 mthca_array_clear(&dev->qp_table.qp, mthca_free_qp() 1438 qp->qpn & (dev->limits.num_qps - 1)); mthca_free_qp() 1439 --qp->refcount; mthca_free_qp() 1444 wait_event(qp->wait, !get_qp_refcount(dev, qp)); mthca_free_qp() 1446 if (qp->state != IB_QPS_RESET) mthca_free_qp() 1447 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, mthca_free_qp() 1455 if (!qp->ibqp.uobject) { mthca_free_qp() 1456 mthca_cq_clean(dev, recv_cq, qp->qpn, mthca_free_qp() 1457 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_free_qp() 1459 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); mthca_free_qp() 1461 mthca_free_memfree(dev, qp); mthca_free_qp() 1462 mthca_free_wqe_buf(dev, qp); mthca_free_qp() 1465 mthca_unmap_memfree(dev, qp); mthca_free_qp() 1467 if (is_sqp(dev, qp)) { mthca_free_qp() 1468 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); mthca_free_qp() 1470 to_msqp(qp)->header_buf_size, mthca_free_qp() 1471 to_msqp(qp)->header_buf, mthca_free_qp() 1472 to_msqp(qp)->header_dma); mthca_free_qp() 1474 mthca_free(&dev->qp_table.alloc, qp->qpn); mthca_free_qp() 1495 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | build_mlx_header() 1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; build_mlx_header() 1520 if (!sqp->qp.ibqp.qp_num) build_mlx_header() 1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, build_mlx_header() 1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, build_mlx_header() 1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); build_mlx_header() 1538 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); build_mlx_header() 1606 struct mthca_qp *qp = to_mqp(ibqp); mthca_tavor_post_send() local 1626 spin_lock_irqsave(&qp->sq.lock, flags); mthca_tavor_post_send() 1630 ind = qp->sq.next_ind; mthca_tavor_post_send() 1633 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_tavor_post_send() 1635 " %d max, %d nreq)\n", qp->qpn, mthca_tavor_post_send() 1636 qp->sq.head, qp->sq.tail, mthca_tavor_post_send() 1637 qp->sq.max, nreq); mthca_tavor_post_send() 1643 wqe = get_send_wqe(qp, ind); mthca_tavor_post_send() 1644 prev_wqe = qp->sq.last; mthca_tavor_post_send() 1645 qp->sq.last = wqe; mthca_tavor_post_send() 1662 switch (qp->transport) { mthca_tavor_post_send() 1717 err = build_mlx_header(dev, to_msqp(qp), ind, wr, mthca_tavor_post_send() 1729 if (wr->num_sge > qp->sq.max_gs) { mthca_tavor_post_send() 1743 if (qp->transport == MLX) { mthca_tavor_post_send() 1751 qp->wrid[ind + qp->rq.max] = wr->wr_id; mthca_tavor_post_send() 1761 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_tavor_post_send() 1762 qp->send_wqe_offset) | mthca_tavor_post_send() 1778 if (unlikely(ind >= qp->sq.max)) mthca_tavor_post_send() 1779 ind -= qp->sq.max; mthca_tavor_post_send() 1786 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + mthca_tavor_post_send() 1787 qp->send_wqe_offset) | f0 | op0, mthca_tavor_post_send() 1788 (qp->qpn << 8) | size0, mthca_tavor_post_send() 1798 qp->sq.next_ind = ind; mthca_tavor_post_send() 1799 qp->sq.head += nreq; mthca_tavor_post_send() 1801 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_tavor_post_send() 1809 struct mthca_qp *qp = to_mqp(ibqp); mthca_tavor_post_receive() local 1827 spin_lock_irqsave(&qp->rq.lock, flags); mthca_tavor_post_receive() 1831 ind = qp->rq.next_ind; mthca_tavor_post_receive() 1834 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_tavor_post_receive() 1836 " %d max, %d nreq)\n", qp->qpn, mthca_tavor_post_receive() 1837 qp->rq.head, qp->rq.tail, mthca_tavor_post_receive() 1838 qp->rq.max, nreq); mthca_tavor_post_receive() 1844 wqe = get_recv_wqe(qp, ind); mthca_tavor_post_receive() 1845 prev_wqe = qp->rq.last; mthca_tavor_post_receive() 1846 qp->rq.last = wqe; mthca_tavor_post_receive() 1855 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mthca_tavor_post_receive() 1867 qp->wrid[ind] = wr->wr_id; mthca_tavor_post_receive() 1876 if (unlikely(ind >= qp->rq.max)) mthca_tavor_post_receive() 1877 ind -= qp->rq.max; mthca_tavor_post_receive() 1885 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, mthca_tavor_post_receive() 1886 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, mthca_tavor_post_receive() 1889 qp->rq.next_ind = ind; mthca_tavor_post_receive() 1890 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; mthca_tavor_post_receive() 1898 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, mthca_tavor_post_receive() 1899 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, mthca_tavor_post_receive() 1903 qp->rq.next_ind = ind; mthca_tavor_post_receive() 1904 qp->rq.head += nreq; mthca_tavor_post_receive() 1912 spin_unlock_irqrestore(&qp->rq.lock, flags); mthca_tavor_post_receive() 1920 struct mthca_qp *qp = to_mqp(ibqp); mthca_arbel_post_send() local 1941 spin_lock_irqsave(&qp->sq.lock, flags); mthca_arbel_post_send() 1945 ind = qp->sq.head & (qp->sq.max - 1); mthca_arbel_post_send() 1952 ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send() 1954 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; mthca_arbel_post_send() 1961 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send() 1969 mthca_write64(dbhi, (qp->qpn << 8) | size0, mthca_arbel_post_send() 1974 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { mthca_arbel_post_send() 1976 " %d max, %d nreq)\n", qp->qpn, mthca_arbel_post_send() 1977 qp->sq.head, qp->sq.tail, mthca_arbel_post_send() 1978 qp->sq.max, nreq); mthca_arbel_post_send() 1984 wqe = get_send_wqe(qp, ind); mthca_arbel_post_send() 1985 prev_wqe = qp->sq.last; mthca_arbel_post_send() 1986 qp->sq.last = wqe; mthca_arbel_post_send() 2003 switch (qp->transport) { mthca_arbel_post_send() 2058 err = build_mlx_header(dev, to_msqp(qp), ind, wr, mthca_arbel_post_send() 2070 if (wr->num_sge > qp->sq.max_gs) { mthca_arbel_post_send() 2084 if (qp->transport == MLX) { mthca_arbel_post_send() 2092 qp->wrid[ind + qp->rq.max] = wr->wr_id; mthca_arbel_post_send() 2102 cpu_to_be32(((ind << qp->sq.wqe_shift) + mthca_arbel_post_send() 2103 qp->send_wqe_offset) | mthca_arbel_post_send() 2119 if (unlikely(ind >= qp->sq.max)) mthca_arbel_post_send() 2120 ind -= qp->sq.max; mthca_arbel_post_send() 2125 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; mthca_arbel_post_send() 2127 qp->sq.head += nreq; mthca_arbel_post_send() 2134 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); mthca_arbel_post_send() 2142 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, mthca_arbel_post_send() 2152 spin_unlock_irqrestore(&qp->sq.lock, flags); mthca_arbel_post_send() 2160 struct mthca_qp *qp = to_mqp(ibqp); mthca_arbel_post_receive() local 2168 spin_lock_irqsave(&qp->rq.lock, flags); mthca_arbel_post_receive() 2172 ind = qp->rq.head & (qp->rq.max - 1); mthca_arbel_post_receive() 2175 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { mthca_arbel_post_receive() 2177 " %d max, %d nreq)\n", qp->qpn, mthca_arbel_post_receive() 2178 qp->rq.head, qp->rq.tail, mthca_arbel_post_receive() 2179 qp->rq.max, nreq); mthca_arbel_post_receive() 2185 wqe = get_recv_wqe(qp, ind); mthca_arbel_post_receive() 2191 if (unlikely(wr->num_sge > qp->rq.max_gs)) { mthca_arbel_post_receive() 2202 if (i < qp->rq.max_gs) mthca_arbel_post_receive() 2205 qp->wrid[ind] = wr->wr_id; mthca_arbel_post_receive() 2208 if (unlikely(ind >= qp->rq.max)) mthca_arbel_post_receive() 2209 ind -= qp->rq.max; mthca_arbel_post_receive() 2213 qp->rq.head += nreq; mthca_arbel_post_receive() 2220 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); mthca_arbel_post_receive() 2223 spin_unlock_irqrestore(&qp->rq.lock, flags); mthca_arbel_post_receive() 2227 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, mthca_free_err_wqe() argument 2236 if (qp->ibqp.srq && !is_send) { mthca_free_err_wqe() 2242 next = get_send_wqe(qp, index); mthca_free_err_wqe() 2244 next = get_recv_wqe(qp, index); mthca_free_err_wqe() 2274 err = mthca_array_init(&dev->qp_table.qp, mthca_init_qp_table() 2296 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_init_qp_table() 2309 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); mthca_cleanup_qp_table() 945 mthca_adjust_qp_caps(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) mthca_adjust_qp_caps() argument 970 mthca_alloc_wqe_buf(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_qp *qp) mthca_alloc_wqe_buf() argument 1062 mthca_free_wqe_buf(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_wqe_buf() argument 1071 mthca_map_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_map_memfree() argument 1103 mthca_unmap_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_unmap_memfree() argument 1112 mthca_alloc_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_alloc_memfree() argument 1132 mthca_free_memfree(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_memfree() argument 1141 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp) mthca_alloc_qp_common() argument 1230 mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, struct mthca_pd *pd, struct mthca_qp *qp) mthca_set_qp_size() argument 1269 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp) mthca_alloc_qp() argument 1421 mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) mthca_free_qp() argument
|
H A D | mthca_provider.c | 515 struct mthca_qp *qp; mthca_create_qp() local 528 qp = kmalloc(sizeof *qp, GFP_KERNEL); mthca_create_qp() 529 if (!qp) mthca_create_qp() 536 kfree(qp); mthca_create_qp() 544 kfree(qp); mthca_create_qp() 556 kfree(qp); mthca_create_qp() 560 qp->mr.ibmr.lkey = ucmd.lkey; mthca_create_qp() 561 qp->sq.db_index = ucmd.sq_db_index; mthca_create_qp() 562 qp->rq.db_index = ucmd.rq_db_index; mthca_create_qp() 569 &init_attr->cap, qp); mthca_create_qp() 584 qp->ibqp.qp_num = qp->qpn; mthca_create_qp() 594 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); mthca_create_qp() 595 if (!qp) mthca_create_qp() 598 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; mthca_create_qp() 604 qp->ibqp.qp_num, init_attr->port_num, mthca_create_qp() 605 to_msqp(qp)); mthca_create_qp() 614 kfree(qp); mthca_create_qp() 618 init_attr->cap.max_send_wr = qp->sq.max; mthca_create_qp() 619 init_attr->cap.max_recv_wr = qp->rq.max; mthca_create_qp() 620 init_attr->cap.max_send_sge = qp->sq.max_gs; mthca_create_qp() 621 init_attr->cap.max_recv_sge = qp->rq.max_gs; mthca_create_qp() 622 init_attr->cap.max_inline_data = qp->max_inline_data; mthca_create_qp() 624 return &qp->ibqp; mthca_create_qp() 627 static int mthca_destroy_qp(struct ib_qp *qp) mthca_destroy_qp() argument 629 if (qp->uobject) { mthca_destroy_qp() 630 mthca_unmap_user_db(to_mdev(qp->device), mthca_destroy_qp() 631 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp() 632 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp() 633 to_mqp(qp)->sq.db_index); mthca_destroy_qp() 634 mthca_unmap_user_db(to_mdev(qp->device), mthca_destroy_qp() 635 &to_mucontext(qp->uobject->context)->uar, mthca_destroy_qp() 636 to_mucontext(qp->uobject->context)->db_tab, mthca_destroy_qp() 637 to_mqp(qp)->rq.db_index); mthca_destroy_qp() 639 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); mthca_destroy_qp() 640 kfree(qp); mthca_destroy_qp()
|
H A D | mthca_mcg.c | 43 __be32 qp[MTHCA_QP_PER_MGM]; member in struct:mthca_mgm 165 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) { mthca_multicast_attach() 170 } else if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) { mthca_multicast_attach() 171 mgm->qp[i] = cpu_to_be32(ibqp->qp_num | (1 << 31)); mthca_multicast_attach() 242 if (mgm->qp[i] == cpu_to_be32(ibqp->qp_num | (1 << 31))) mthca_multicast_detach() 244 if (!(mgm->qp[i] & cpu_to_be32(1 << 31))) mthca_multicast_detach() 254 mgm->qp[loc] = mgm->qp[i - 1]; mthca_multicast_detach() 255 mgm->qp[i - 1] = 0; mthca_multicast_detach()
|
H A D | mthca_provider.h | 139 * struct mthca_cq/qp also has its own lock. An individual qp lock 141 * a qp may be locked, with the cq with the lower cqn locked first. 144 * Each struct mthca_cq/qp also has an ref count, protected by the 151 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 179 * - split cq/qp table lock into n separate (cache-aligned) locks, 289 struct mthca_qp qp; member in struct:mthca_sqp 339 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) to_msqp() argument 341 return container_of(qp, struct mthca_sqp, qp); to_msqp()
|
H A D | mthca_eq.c | 144 } __attribute__((packed)) qp; member in union:mthca_eqe::__anon4926 282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int() 322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, mthca_eq_int()
|
H A D | mthca_dev.h | 259 struct mthca_array qp; member in struct:mthca_qp_table 541 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 550 struct mthca_qp *qp); 560 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
|
H A D | mthca_cq.c | 378 struct mthca_qp *qp, int wqe_index, int is_send, handle_error_cqe() 466 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); handle_error_cqe() 525 *cur_qp = mthca_array_get(&dev->qp_table.qp, mthca_poll_one() 536 entry->qp = &(*cur_qp)->ibqp; mthca_poll_one() 667 struct mthca_qp *qp = NULL; mthca_poll_cq() local 678 err = mthca_poll_one(dev, cq, &qp, mthca_poll_cq() 377 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) handle_error_cqe() argument
|
H A D | mthca_mad.c | 89 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, update_sm_ah()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
H A D | Makefile | 5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
|
H A D | device.c | 230 struct c4iw_qp *qp = p; dump_qp() local 235 if (id != qp->wq.sq.qid) dump_qp() 242 if (qp->ep) { dump_qp() 243 if (qp->ep->com.local_addr.ss_family == AF_INET) { dump_qp() 245 &qp->ep->com.local_addr; dump_qp() 247 &qp->ep->com.remote_addr; dump_qp() 249 &qp->ep->com.mapped_local_addr; dump_qp() 251 &qp->ep->com.mapped_remote_addr; dump_qp() 254 "rc qp sq id %u rq id %u state %u " dump_qp() 257 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 258 (int)qp->attr.state, dump_qp() 259 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp() 260 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp() 267 &qp->ep->com.local_addr; dump_qp() 269 &qp->ep->com.remote_addr; dump_qp() 272 &qp->ep->com.mapped_local_addr; dump_qp() 275 &qp->ep->com.mapped_remote_addr; dump_qp() 278 "rc qp sq id %u rq id %u state %u " dump_qp() 281 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 282 (int)qp->attr.state, dump_qp() 283 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp() 284 qp->ep->hwtid, (int)qp->ep->com.state, dump_qp() 294 "qp sq id %u rq id %u state %u onchip %u\n", dump_qp() 295 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 296 (int)qp->attr.state, dump_qp() 297 qp->wq.sq.flags & T4_SQ_ONCHIP); dump_qp() 563 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep() 567 ep, ep->com.cm_id, ep->com.qp, dump_ep() 587 "ep %p cm_id %p qp %p state %d flags 0x%lx " dump_ep() 591 ep, ep->com.cm_id, ep->com.qp, dump_ep() 787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || c4iw_rdev_open() 788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { c4iw_rdev_open() 789 pr_err(MOD "%s: unsupported qp and cq id ranges " c4iw_rdev_open() 790 "qp start %u size %u cq start %u size %u\n", c4iw_rdev_open() 791 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, c4iw_rdev_open() 792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, c4iw_rdev_open() 800 * to get the correct address of the doorbell for that qp. c4iw_rdev_open() 808 "qp qid start %u size %u cq qid start %u size %u\n", c4iw_rdev_open() 814 rdev->lldi.vr->qp.start, c4iw_rdev_open() 815 rdev->lldi.vr->qp.size, c4iw_rdev_open() 837 rdev->stats.qid.total = rdev->lldi.vr->qp.size; c4iw_rdev_open() 926 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && rdma_supported() 974 * For T4 devices with onchip qp mem, we map only that part c4iw_alloc() 1247 struct c4iw_qp *qp = p; disable_qp_db() local 1249 t4_disable_wq_db(&qp->wq); disable_qp_db() 1269 struct c4iw_qp *qp = p; enable_qp_db() local 1271 t4_enable_wq_db(&qp->wq); enable_qp_db() 1275 static void resume_rc_qp(struct c4iw_qp *qp) resume_rc_qp() argument 1277 spin_lock(&qp->lock); resume_rc_qp() 1278 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, resume_rc_qp() 1279 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); resume_rc_qp() 1280 qp->wq.sq.wq_pidx_inc = 0; resume_rc_qp() 1281 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, resume_rc_qp() 1282 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); resume_rc_qp() 1283 qp->wq.rq.wq_pidx_inc = 0; resume_rc_qp() 1284 spin_unlock(&qp->lock); resume_rc_qp() 1290 struct c4iw_qp *qp; resume_a_chunk() local 1293 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, resume_a_chunk() 1295 list_del_init(&qp->db_fc_entry); resume_a_chunk() 1296 resume_rc_qp(qp); resume_a_chunk() 1352 struct c4iw_qp *qp = p; add_and_ref_qp() local 1354 c4iw_qp_add_ref(&qp->ibqp); add_and_ref_qp() 1355 qp_listp->qps[qp_listp->idx++] = qp; add_and_ref_qp() 1380 struct c4iw_qp *qp = qp_list->qps[idx]; recover_lost_dbs() local 1382 spin_lock_irq(&qp->rhp->lock); recover_lost_dbs() 1383 spin_lock(&qp->lock); recover_lost_dbs() 1384 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], recover_lost_dbs() 1385 qp->wq.sq.qid, recover_lost_dbs() 1386 t4_sq_host_wq_pidx(&qp->wq), recover_lost_dbs() 1387 t4_sq_wq_size(&qp->wq)); recover_lost_dbs() 1392 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); recover_lost_dbs() 1393 spin_unlock(&qp->lock); recover_lost_dbs() 1394 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs() 1397 qp->wq.sq.wq_pidx_inc = 0; recover_lost_dbs() 1399 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], recover_lost_dbs() 1400 qp->wq.rq.qid, recover_lost_dbs() 1401 t4_rq_host_wq_pidx(&qp->wq), recover_lost_dbs() 1402 t4_rq_wq_size(&qp->wq)); recover_lost_dbs() 1408 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); recover_lost_dbs() 1409 spin_unlock(&qp->lock); recover_lost_dbs() 1410 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs() 1413 qp->wq.rq.wq_pidx_inc = 0; recover_lost_dbs() 1414 spin_unlock(&qp->lock); recover_lost_dbs() 1415 spin_unlock_irq(&qp->rhp->lock); recover_lost_dbs() 1418 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { recover_lost_dbs() 1458 /* add and ref each qp so it doesn't get freed */ recover_queues()
|
H A D | resource.c | 43 rdev->lldi.vr->qp.start, c4iw_init_qid_table() 44 rdev->lldi.vr->qp.size, c4iw_init_qid_table() 45 rdev->lldi.vr->qp.size, 0)) c4iw_init_qid_table() 48 for (i = rdev->lldi.vr->qp.start; c4iw_init_qid_table() 49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) c4iw_init_qid_table() 126 * now put the same ids on the qp list since they all c4iw_get_cqid()
|
H A D | cm.c | 153 c4iw_qp_rem_ref(&ep->com.qp->ibqp); deref_qp() 160 c4iw_qp_add_ref(&ep->com.qp->ibqp); ref_qp() 1472 err = c4iw_modify_qp(ep->com.qp->rhp, process_mpa_reply() 1473 ep->com.qp, mask, &attrs, 1); process_mpa_reply() 1487 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply() 1507 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, process_mpa_reply() 1704 BUG_ON(!ep->com.qp); rx_data() 1708 __func__, ep->com.qp->wq.sq.qid, ep, rx_data() 1711 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, rx_data() 1967 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); c4iw_reconnect() 2518 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close() 2534 if (ep->com.cm_id && ep->com.qp) { peer_close() 2536 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close() 2627 if (ep->com.cm_id && ep->com.qp) { peer_abort() 2629 ret = c4iw_modify_qp(ep->com.qp->rhp, peer_abort() 2630 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, peer_abort() 2634 "%s - qp <- error failed!\n", peer_abort() 2707 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl() 2709 c4iw_modify_qp(ep->com.qp->rhp, close_con_rpl() 2710 ep->com.qp, close_con_rpl() 2742 if (ep && ep->com.qp) { terminate() 2744 ep->com.qp->wq.sq.qid); terminate() 2746 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, terminate() 2749 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); terminate() 2822 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); c4iw_accept_cr() local 2833 BUG_ON(!qp); c4iw_accept_cr() 2885 ep->com.qp = qp; c4iw_accept_cr() 2902 err = c4iw_modify_qp(ep->com.qp->rhp, c4iw_accept_cr() 2903 ep->com.qp, mask, &attrs, 1); c4iw_accept_cr() 3027 ep->com.qp = get_qhp(dev, conn_param->qpn); c4iw_connect() 3028 if (!ep->com.qp) { c4iw_connect() 3034 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, c4iw_connect() 3035 ep->com.qp, cm_id); c4iw_connect() 3817 if (ep->com.cm_id && ep->com.qp) { process_timeout() 3819 c4iw_modify_qp(ep->com.qp->rhp, process_timeout() 3820 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, process_timeout()
|
H A D | qp.c | 669 void c4iw_qp_add_ref(struct ib_qp *qp) c4iw_qp_add_ref() argument 671 PDBG("%s ib_qp %p\n", __func__, qp); c4iw_qp_add_ref() 672 atomic_inc(&(to_c4iw_qp(qp)->refcnt)); c4iw_qp_add_ref() 675 void c4iw_qp_rem_ref(struct ib_qp *qp) c4iw_qp_rem_ref() argument 677 PDBG("%s ib_qp %p\n", __func__, qp); c4iw_qp_rem_ref() 678 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) c4iw_qp_rem_ref() 679 wake_up(&(to_c4iw_qp(qp)->wait)); c4iw_qp_rem_ref() 927 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) c4iw_bind_mw() argument 1113 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp() 1130 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
|
H A D | ev.c | 109 event.element.qp = &qhp->ibqp; post_qp_event()
|
H A D | iw_cxgb4.h | 798 struct c4iw_qp *qp; member in struct:c4iw_ep_common 962 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 969 void c4iw_qp_add_ref(struct ib_qp *qp); 970 void c4iw_qp_rem_ref(struct ib_qp *qp);
|
H A D | provider.c | 321 props->max_qp = dev->rdev.lldi.vr->qp.size / 2; c4iw_query_device() 329 props->max_cq = dev->rdev.lldi.vr->qp.size; c4iw_query_device()
|
/linux-4.1.27/net/ipv4/ |
H A D | ip_fragment.c | 95 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 121 const struct ipq *qp; ip4_frag_match() local 124 qp = container_of(q, struct ipq, q); ip4_frag_match() 125 return qp->id == arg->iph->id && ip4_frag_match() 126 qp->saddr == arg->iph->saddr && ip4_frag_match() 127 qp->daddr == arg->iph->daddr && ip4_frag_match() 128 qp->protocol == arg->iph->protocol && ip4_frag_match() 129 qp->user == arg->user; ip4_frag_match() 134 struct ipq *qp = container_of(q, struct ipq, q); ip4_frag_init() local 141 qp->protocol = arg->iph->protocol; ip4_frag_init() 142 qp->id = arg->iph->id; ip4_frag_init() 143 qp->ecn = ip4_frag_ecn(arg->iph->tos); ip4_frag_init() 144 qp->saddr = arg->iph->saddr; ip4_frag_init() 145 qp->daddr = arg->iph->daddr; ip4_frag_init() 146 qp->user = arg->user; ip4_frag_init() 147 qp->peer = sysctl_ipfrag_max_dist ? ip4_frag_init() 153 struct ipq *qp; ip4_frag_free() local 155 qp = container_of(q, struct ipq, q); ip4_frag_free() 156 if (qp->peer) ip4_frag_free() 157 inet_putpeer(qp->peer); ip4_frag_free() 181 struct ipq *qp; ip_expire() local 184 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); ip_expire() 185 net = container_of(qp->q.net, struct net, ipv4.frags); ip_expire() 187 spin_lock(&qp->q.lock); ip_expire() 189 if (qp->q.flags & INET_FRAG_COMPLETE) ip_expire() 192 ipq_kill(qp); ip_expire() 195 if (!(qp->q.flags & INET_FRAG_EVICTED)) { ip_expire() 196 struct sk_buff *head = qp->q.fragments; ip_expire() 202 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) ip_expire() 206 head->dev = dev_get_by_index_rcu(net, qp->iif); ip_expire() 220 if (qp->user == IP_DEFRAG_AF_PACKET || ip_expire() 221 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && ip_expire() 222 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && ip_expire() 232 spin_unlock(&qp->q.lock); ip_expire() 233 ipq_put(qp); ip_expire() 259 static int ip_frag_too_far(struct ipq *qp) ip_frag_too_far() argument 261 struct inet_peer *peer = qp->peer; ip_frag_too_far() 270 start = qp->rid; ip_frag_too_far() 272 qp->rid = end; ip_frag_too_far() 274 rc = qp->q.fragments && (end - start) > max; ip_frag_too_far() 279 net = container_of(qp->q.net, struct net, ipv4.frags); ip_frag_too_far() 286 static int ip_frag_reinit(struct ipq *qp) ip_frag_reinit() argument 291 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { ip_frag_reinit() 292 atomic_inc(&qp->q.refcnt); ip_frag_reinit() 296 fp = qp->q.fragments; ip_frag_reinit() 304 sub_frag_mem_limit(&qp->q, sum_truesize); ip_frag_reinit() 306 qp->q.flags = 0; ip_frag_reinit() 307 qp->q.len = 0; ip_frag_reinit() 308 qp->q.meat = 0; ip_frag_reinit() 309 qp->q.fragments = NULL; ip_frag_reinit() 310 qp->q.fragments_tail = NULL; ip_frag_reinit() 311 qp->iif = 0; ip_frag_reinit() 312 qp->ecn = 0; ip_frag_reinit() 318 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ip_frag_queue() argument 327 if (qp->q.flags & INET_FRAG_COMPLETE) ip_frag_queue() 331 unlikely(ip_frag_too_far(qp)) && ip_frag_queue() 332 unlikely(err = ip_frag_reinit(qp))) { ip_frag_queue() 333 ipq_kill(qp); ip_frag_queue() 353 if (end < qp->q.len || ip_frag_queue() 354 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) ip_frag_queue() 356 qp->q.flags |= INET_FRAG_LAST_IN; ip_frag_queue() 357 qp->q.len = end; ip_frag_queue() 364 if (end > qp->q.len) { ip_frag_queue() 366 if (qp->q.flags & INET_FRAG_LAST_IN) ip_frag_queue() 368 qp->q.len = end; ip_frag_queue() 386 prev = qp->q.fragments_tail; ip_frag_queue() 392 for (next = qp->q.fragments; next != NULL; next = next->next) { ip_frag_queue() 431 qp->q.meat -= i; ip_frag_queue() 446 qp->q.fragments = next; ip_frag_queue() 448 qp->q.meat -= free_it->len; ip_frag_queue() 449 sub_frag_mem_limit(&qp->q, free_it->truesize); ip_frag_queue() 459 qp->q.fragments_tail = skb; ip_frag_queue() 463 qp->q.fragments = skb; ip_frag_queue() 467 qp->iif = dev->ifindex; ip_frag_queue() 470 qp->q.stamp = skb->tstamp; ip_frag_queue() 471 qp->q.meat += skb->len; ip_frag_queue() 472 qp->ecn |= ecn; ip_frag_queue() 473 add_frag_mem_limit(&qp->q, skb->truesize); ip_frag_queue() 475 qp->q.flags |= INET_FRAG_FIRST_IN; ip_frag_queue() 478 skb->len + ihl > qp->q.max_size) ip_frag_queue() 479 qp->q.max_size = skb->len + ihl; ip_frag_queue() 481 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && ip_frag_queue() 482 qp->q.meat == qp->q.len) { ip_frag_queue() 486 err = ip_frag_reasm(qp, prev, dev); ip_frag_queue() 502 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, ip_frag_reasm() argument 505 struct net *net = container_of(qp->q.net, struct net, ipv4.frags); ip_frag_reasm() 507 struct sk_buff *fp, *head = qp->q.fragments; ip_frag_reasm() 514 ipq_kill(qp); ip_frag_reasm() 516 ecn = ip_frag_ecn_table[qp->ecn]; ip_frag_reasm() 530 qp->q.fragments_tail = fp; ip_frag_reasm() 533 skb_morph(head, qp->q.fragments); ip_frag_reasm() 534 head->next = qp->q.fragments->next; ip_frag_reasm() 536 consume_skb(qp->q.fragments); ip_frag_reasm() 537 qp->q.fragments = head; ip_frag_reasm() 545 len = ihlen + qp->q.len; ip_frag_reasm() 576 add_frag_mem_limit(&qp->q, clone->truesize); ip_frag_reasm() 604 sub_frag_mem_limit(&qp->q, sum_truesize); ip_frag_reasm() 608 head->tstamp = qp->q.stamp; ip_frag_reasm() 609 IPCB(head)->frag_max_size = qp->q.max_size; ip_frag_reasm() 613 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; ip_frag_reasm() 620 qp->q.fragments = NULL; ip_frag_reasm() 621 qp->q.fragments_tail = NULL; ip_frag_reasm() 625 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); ip_frag_reasm() 629 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); ip_frag_reasm() 638 struct ipq *qp; ip_defrag() local 645 qp = ip_find(net, ip_hdr(skb), user); ip_defrag() 646 if (qp) { ip_defrag() 649 spin_lock(&qp->q.lock); ip_defrag() 651 ret = ip_frag_queue(qp, skb); ip_defrag() 653 spin_unlock(&qp->q.lock); ip_defrag() 654 ipq_put(qp); ip_defrag()
|
H A D | inet_fragment.c | 347 struct inet_frag_queue *qp; inet_frag_intern() local 354 hlist_for_each_entry(qp, &hb->chain, list) { inet_frag_intern() 355 if (qp->net == nf && f->match(qp, arg)) { inet_frag_intern() 356 atomic_inc(&qp->refcnt); inet_frag_intern() 360 return qp; inet_frag_intern() 364 qp = qp_in; inet_frag_intern() 365 if (!mod_timer(&qp->timer, jiffies + nf->timeout)) inet_frag_intern() 366 atomic_inc(&qp->refcnt); inet_frag_intern() 368 atomic_inc(&qp->refcnt); inet_frag_intern() 369 hlist_add_head(&qp->list, &hb->chain); inet_frag_intern() 373 return qp; inet_frag_intern()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | Makefile | 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
H A D | qp.c | 37 #include <linux/mlx5/qp.h> 73 struct mlx5_core_qp *qp; mlx5_rsc_event() local 80 qp = (struct mlx5_core_qp *)common; mlx5_rsc_event() 81 qp->event(qp, event_type); mlx5_rsc_event() 97 struct mlx5_core_qp *qp = mlx5_eq_pagefault() local 101 if (!qp) { mlx5_eq_pagefault() 165 if (qp->pfault_handler) { mlx5_eq_pagefault() 166 qp->pfault_handler(qp, &pfault); mlx5_eq_pagefault() 181 struct mlx5_core_qp *qp, mlx5_core_create_qp() 206 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; mlx5_core_create_qp() 207 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); mlx5_core_create_qp() 209 qp->common.res = MLX5_RES_QP; mlx5_core_create_qp() 211 err = radix_tree_insert(&table->tree, qp->qpn, qp); mlx5_core_create_qp() 218 err = mlx5_debug_qp_add(dev, qp); mlx5_core_create_qp() 221 qp->qpn); mlx5_core_create_qp() 223 qp->pid = current->pid; mlx5_core_create_qp() 224 atomic_set(&qp->common.refcount, 1); mlx5_core_create_qp() 226 init_completion(&qp->common.free); mlx5_core_create_qp() 234 din.qpn = cpu_to_be32(qp->qpn); mlx5_core_create_qp() 242 struct mlx5_core_qp *qp) mlx5_core_destroy_qp() 250 mlx5_debug_qp_remove(dev, qp); mlx5_core_destroy_qp() 253 radix_tree_delete(&table->tree, qp->qpn); mlx5_core_destroy_qp() 256 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); mlx5_core_destroy_qp() 257 wait_for_completion(&qp->common.free); mlx5_core_destroy_qp() 262 in.qpn = cpu_to_be32(qp->qpn); mlx5_core_destroy_qp() 278 struct mlx5_core_qp *qp) mlx5_core_qp_modify() 328 in->qpn = cpu_to_be32(qp->qpn); mlx5_core_qp_modify() 351 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, mlx5_core_qp_query() argument 360 in.qpn = cpu_to_be32(qp->qpn); mlx5_core_qp_query() 180 mlx5_core_create_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, struct mlx5_create_qp_mbox_in *in, int inlen) mlx5_core_create_qp() argument 241 mlx5_core_destroy_qp(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_core_destroy_qp() argument 275 mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, enum mlx5_qp_state new_state, struct mlx5_modify_qp_mbox_in *in, int sqd_event, struct mlx5_core_qp *qp) mlx5_core_qp_modify() argument
|
H A D | debugfs.c | 35 #include <linux/mlx5/qp.h> 277 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, qp_read_field() argument 290 err = mlx5_core_qp_query(dev, qp, out, sizeof(*out)); qp_read_field() 292 mlx5_core_warn(dev, "failed to query qp\n"); qp_read_field() 300 param = qp->pid; qp_read_field() 536 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_debug_qp_add() argument 544 &qp->dbg, qp->qpn, qp_fields, mlx5_debug_qp_add() 545 ARRAY_SIZE(qp_fields), qp); mlx5_debug_qp_add() 547 qp->dbg = NULL; mlx5_debug_qp_add() 552 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) mlx5_debug_qp_remove() argument 557 if (qp->dbg) mlx5_debug_qp_remove() 558 rem_res_tree(qp->dbg); mlx5_debug_qp_remove()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
H A D | ehca_uverbs.c | 198 static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, ehca_mmap_qp() argument 205 ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num); ehca_mmap_qp() 206 ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa); ehca_mmap_qp() 208 ehca_err(qp->ib_qp.device, ehca_mmap_qp() 210 ret, qp->ib_qp.qp_num); ehca_mmap_qp() 215 case 1: /* qp rqueue_addr */ ehca_mmap_qp() 216 ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); ehca_mmap_qp() 217 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, ehca_mmap_qp() 218 &qp->mm_count_rqueue); ehca_mmap_qp() 220 ehca_err(qp->ib_qp.device, ehca_mmap_qp() 222 ret, qp->ib_qp.qp_num); ehca_mmap_qp() 227 case 2: /* qp squeue_addr */ ehca_mmap_qp() 228 ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); ehca_mmap_qp() 229 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, ehca_mmap_qp() 230 &qp->mm_count_squeue); ehca_mmap_qp() 232 ehca_err(qp->ib_qp.device, ehca_mmap_qp() 234 ret, qp->ib_qp.qp_num); ehca_mmap_qp() 240 ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x", ehca_mmap_qp() 241 rsrc_type, qp->ib_qp.qp_num); ehca_mmap_qp() 256 struct ehca_qp *qp; ehca_mmap() local 283 qp = idr_find(&ehca_qp_idr, idr_handle); ehca_mmap() 287 if (!qp) ehca_mmap() 290 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject; ehca_mmap() 294 ret = ehca_mmap_qp(vma, qp, rsrc_type); ehca_mmap() 296 ehca_err(qp->ib_qp.device, ehca_mmap() 298 ret, qp->ib_qp.qp_num); ehca_mmap()
|
H A D | ehca_reqs.c | 154 static inline int ehca_write_swqe(struct ehca_qp *qp, ehca_write_swqe() argument 164 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx]; ehca_write_swqe() 167 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { ehca_write_swqe() 170 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); ehca_write_swqe() 205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) ehca_write_swqe() 220 switch (qp->qp_type) { ehca_write_swqe() 227 remote_qkey = qp->qkey; ehca_write_swqe() 232 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); ehca_write_swqe() 236 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num); ehca_write_swqe() 254 if (qp->qp_type == IB_QPT_SMI || ehca_write_swqe() 255 qp->qp_type == IB_QPT_GSI) ehca_write_swqe() 257 if (qp->qp_type == IB_QPT_GSI) { ehca_write_swqe() 294 qp->message_count = qp->packet_count = 0; ehca_write_swqe() 295 qp->unsol_ack_circ = 1; ehca_write_swqe() 298 qp->packet_count += (dma_length >> qp->mtu_shift) + 1; ehca_write_swqe() 303 ehca_gen_err("Invalid qptype=%x", qp->qp_type); ehca_write_swqe() 308 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); ehca_write_swqe() 440 int ehca_post_send(struct ib_qp *qp, ehca_post_send() argument 444 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_post_send() 451 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", ehca_post_send() 452 my_qp->state, qp->qp_num); ehca_post_send() 475 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num); ehca_post_send() 493 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", ehca_post_send() 494 my_qp, qp->qp_num, wqe_cnt, ret); ehca_post_send() 583 int ehca_post_recv(struct ib_qp *qp, ehca_post_recv() argument 587 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_post_recv() 591 ehca_err(qp->device, "Invalid QP state qp_state=%d qpn=%x", ehca_post_recv() 592 my_qp->state, qp->qp_num); ehca_post_recv() 597 return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr); ehca_post_recv() 651 struct ehca_qp *qp; ehca_poll_cq_one() local 655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); ehca_poll_cq_one() 656 if (!qp) { ehca_poll_cq_one() 658 "could not find qp -> ignore cqe", ehca_poll_cq_one() 665 spin_lock_irqsave(&qp->spinlock_s, flags); ehca_poll_cq_one() 666 purgeflag = qp->sqerr_purgeflag; ehca_poll_cq_one() 667 spin_unlock_irqrestore(&qp->spinlock_s, flags); ehca_poll_cq_one() 681 qp->sqerr_purgeflag = 0; ehca_poll_cq_one() 705 wc->qp = &my_qp->ib_qp; ehca_poll_cq_one() 857 wc->qp = &my_qp->ib_qp; generate_flush_cqes()
|
H A D | ehca_irq.c | 98 struct ehca_qp *qp = (struct ehca_qp *)data; print_error_data() local 106 qp->ib_qp.qp_num, resource); print_error_data() 179 static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, dispatch_qp_event() argument 185 if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed) dispatch_qp_event() 191 if (qp->ext_type == EQPT_SRQ) { dispatch_qp_event() 192 if (!qp->ib_srq.event_handler) dispatch_qp_event() 195 event.element.srq = &qp->ib_srq; dispatch_qp_event() 196 qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); dispatch_qp_event() 198 if (!qp->ib_qp.event_handler) dispatch_qp_event() 201 event.element.qp = &qp->ib_qp; dispatch_qp_event() 202 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); dispatch_qp_event() 209 struct ehca_qp *qp; qp_event_callback() local 213 qp = idr_find(&ehca_qp_idr, token); qp_event_callback() 214 if (qp) qp_event_callback() 215 atomic_inc(&qp->nr_events); qp_event_callback() 218 if (!qp) qp_event_callback() 222 ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); qp_event_callback() 224 dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ? qp_event_callback() 232 if (fatal && qp->ext_type == EQPT_SRQBASE) qp_event_callback() 233 dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); qp_event_callback() 235 if (atomic_dec_and_test(&qp->nr_events)) qp_event_callback() 236 wake_up(&qp->wait_completion); qp_event_callback()
|
H A D | ehca_cq.c | 55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) ehca_cq_assign_qp() argument 57 unsigned int qp_num = qp->real_qp_num; ehca_cq_assign_qp() 62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); ehca_cq_assign_qp() 76 struct ehca_qp *qp; ehca_cq_unassign_qp() local 81 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_unassign_qp() 82 if (qp->real_qp_num == real_qp_num) { ehca_cq_unassign_qp() 85 "removed qp from cq .cq_num=%x real_qp_num=%x", ehca_cq_unassign_qp() 94 "qp not found cq_num=%x real_qp_num=%x", ehca_cq_unassign_qp() 105 struct ehca_qp *qp; ehca_cq_get_qp() local 107 qp = hlist_entry(iter, struct ehca_qp, list_entries); ehca_cq_get_qp() 108 if (qp->real_qp_num == real_qp_num) { ehca_cq_get_qp() 109 ret = qp; ehca_cq_get_qp()
|
H A D | hipz_fns_core.h | 61 static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) hipz_update_sqa() argument 64 hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, hipz_update_sqa() 68 static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) hipz_update_rqa() argument 71 hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, hipz_update_rqa()
|
H A D | ehca_iverbs.h | 100 int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, 147 int ehca_destroy_qp(struct ib_qp *qp); 152 int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 155 int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, 158 int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 179 int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 181 int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 200 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
|
H A D | ehca_qp.c | 58 * attributes not supported by query qp 64 * ehca (internal) qp state values 77 * qp state transitions as defined by IB Arch Rel 1.1 page 431 96 * returns ehca qp state corresponding to given ib qp state 123 * returns ib qp state corresponding to given ehca qp state 163 * returns ehca qp type corresponding to ib qp type 238 * ib qp type used by create_qp() 400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq) ehca_add_to_err_list() argument 405 if (qp->ext_type == EQPT_LLQP) ehca_add_to_err_list() 409 list = &qp->send_cq->sqp_err_list; ehca_add_to_err_list() 410 node = &qp->sq_err_node; ehca_add_to_err_list() 412 list = &qp->recv_cq->rqp_err_list; ehca_add_to_err_list() 413 node = &qp->rq_err_node; ehca_add_to_err_list() 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); internal_create_qp() 651 ehca_err(pd->device, "Invalid number of qp"); internal_create_qp() 849 /* alloc array to cache subsequent modify qp parms internal_create_qp() 882 "Couldn't assign qp to send_cq ret=%i", ret); internal_create_qp() 1279 if (qp_cur_state == -EINVAL) { /* invalid qp state */ internal_modify_qp() 1335 "Invalid qp transition new_state=%x cur_state=%x " internal_modify_qp() 1346 ehca_err(ibqp->device, "Invalid new qp state=%x " internal_modify_qp() 1474 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: " internal_modify_qp() 1887 int ehca_query_qp(struct ib_qp *qp, ehca_query_qp() argument 1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); ehca_query_qp() 1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, ehca_query_qp() 1900 ehca_err(qp->device, "Invalid attribute mask " ehca_query_qp() 1902 my_qp, qp->qp_num, qp_attr_mask); ehca_query_qp() 1908 ehca_err(qp->device, "Out of memory for qpcb " ehca_query_qp() 1909 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); ehca_query_qp() 1920 ehca_err(qp->device, "hipz_h_query_qp() failed " ehca_query_qp() 1922 my_qp, qp->qp_num, h_ret); ehca_query_qp() 1931 ehca_err(qp->device, "Got invalid ehca_qp_state=%x " ehca_query_qp() 1933 qpcb->qp_state, my_qp, qp->qp_num); ehca_query_qp() 2024 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); ehca_query_qp() 2159 ehca_err(dev, "Couldn't unassign qp from " internal_destroy_qp() 2228 int ehca_destroy_qp(struct ib_qp *qp) ehca_destroy_qp() argument 2230 return internal_destroy_qp(qp->device, ehca_destroy_qp() 2231 container_of(qp, struct ehca_qp, ib_qp), ehca_destroy_qp() 2232 qp->uobject); ehca_destroy_qp()
|
H A D | ehca_classes.h | 153 /* struct to cache modify_qp()'s parms for GSI/SMI qp */ 211 /* array to cache modify_qp()'s parms for GSI/SMI qp */ 230 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 231 #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ) 232 #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE) 478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
|
H A D | hcp_if.c | 583 struct ehca_qp *qp) hipz_h_destroy_qp() 588 ret = hcp_galpas_dtor(&qp->galpas); hipz_h_destroy_qp() 590 ehca_gen_err("Could not destruct qp->galpas"); hipz_h_destroy_qp() 597 qp->ipz_qp_handle.handle, /* r6 */ hipz_h_destroy_qp() 604 qp->ipz_qp_handle.handle, /* r5 */ hipz_h_destroy_qp() 582 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, struct ehca_qp *qp) hipz_h_destroy_qp() argument
|
H A D | hcp_if.h | 161 struct ehca_qp *qp);
|
/linux-4.1.27/drivers/infiniband/hw/amso1100/ |
H A D | c2_qp.c | 120 void c2_set_qp_state(struct c2_qp *qp, int c2_state) c2_set_qp_state() argument 124 pr_debug("%s: qp[%p] state modify %s --> %s\n", c2_set_qp_state() 126 qp, c2_set_qp_state() 127 to_ib_state_str(qp->state), c2_set_qp_state() 129 qp->state = new_state; c2_set_qp_state() 134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, c2_qp_modify() argument 144 pr_debug("%s:%d qp=%p, %s --> %s\n", c2_qp_modify() 146 qp, c2_qp_modify() 147 to_ib_state_str(qp->state), c2_qp_modify() 157 wr.qp_handle = qp->adapter_handle; c2_qp_modify() 173 spin_lock_irqsave(&qp->lock, flags); c2_qp_modify() 174 if (qp->cm_id && qp->state == IB_QPS_RTS) { c2_qp_modify() 176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); c2_qp_modify() 178 vq_req->cm_id = qp->cm_id; c2_qp_modify() 181 spin_unlock_irqrestore(&qp->lock, flags); c2_qp_modify() 225 qp->state = next_state; c2_qp_modify() 235 spin_lock_irqsave(&qp->lock, flags); c2_qp_modify() 236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { c2_qp_modify() 237 qp->cm_id->rem_ref(qp->cm_id); c2_qp_modify() 238 qp->cm_id = NULL; c2_qp_modify() 240 spin_unlock_irqrestore(&qp->lock, flags); c2_qp_modify() 246 pr_debug("%s:%d qp=%p, cur_state=%s\n", c2_qp_modify() 248 qp, c2_qp_modify() 249 to_ib_state_str(qp->state)); c2_qp_modify() 253 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, c2_qp_set_read_limits() argument 268 wr.qp_handle = qp->adapter_handle; c2_qp_set_read_limits() 302 static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) destroy_qp() argument 324 wr.qp_handle = qp->adapter_handle; destroy_qp() 331 spin_lock_irqsave(&qp->lock, flags); destroy_qp() 332 if (qp->cm_id && qp->state == IB_QPS_RTS) { destroy_qp() 334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); destroy_qp() 336 vq_req->qp = qp; destroy_qp() 337 vq_req->cm_id = qp->cm_id; destroy_qp() 340 spin_unlock_irqrestore(&qp->lock, flags); destroy_qp() 368 spin_lock_irqsave(&qp->lock, flags); destroy_qp() 369 if (qp->cm_id) { destroy_qp() 370 qp->cm_id->rem_ref(qp->cm_id); destroy_qp() 371 qp->cm_id = NULL; destroy_qp() 373 spin_unlock_irqrestore(&qp->lock, flags); destroy_qp() 381 static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) c2_alloc_qpn() argument 388 ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT); c2_alloc_qpn() 390 qp->qpn = ret; c2_alloc_qpn() 407 struct c2_qp *qp; c2_find_qpn() local 410 qp = idr_find(&c2dev->qp_table.idr, qpn); c2_find_qpn() 412 return qp; c2_find_qpn() 417 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) c2_alloc_qp() 429 err = c2_alloc_qpn(c2dev, qp); c2_alloc_qp() 432 qp->ibqp.qp_num = qp->qpn; c2_alloc_qp() 433 qp->ibqp.qp_type = IB_QPT_RC; c2_alloc_qp() 436 qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, c2_alloc_qp() 437 &qp->sq_mq.shared_dma, GFP_KERNEL); c2_alloc_qp() 438 if (!qp->sq_mq.shared) { c2_alloc_qp() 443 qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, c2_alloc_qp() 444 &qp->rq_mq.shared_dma, GFP_KERNEL); c2_alloc_qp() 445 if (!qp->rq_mq.shared) { c2_alloc_qp() 472 wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); c2_alloc_qp() 473 wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); c2_alloc_qp() 477 wr.user_context = (unsigned long) qp; c2_alloc_qp() 506 atomic_set(&qp->refcount, 1); c2_alloc_qp() 507 qp->adapter_handle = reply->qp_handle; c2_alloc_qp() 508 qp->state = IB_QPS_RESET; c2_alloc_qp() 509 qp->send_sgl_depth = qp_attrs->cap.max_send_sge; c2_alloc_qp() 510 qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; c2_alloc_qp() 511 qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; c2_alloc_qp() 512 init_waitqueue_head(&qp->wait); c2_alloc_qp() 525 c2_mq_req_init(&qp->sq_mq, c2_alloc_qp() 544 c2_mq_req_init(&qp->rq_mq, c2_alloc_qp() 558 iounmap(qp->sq_mq.peer); c2_alloc_qp() 560 destroy_qp(c2dev, qp); c2_alloc_qp() 566 c2_free_mqsp(qp->rq_mq.shared); c2_alloc_qp() 568 c2_free_mqsp(qp->sq_mq.shared); c2_alloc_qp() 570 c2_free_qpn(c2dev, qp->qpn); c2_alloc_qp() 600 void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) c2_free_qp() argument 605 send_cq = to_c2cq(qp->ibqp.send_cq); c2_free_qp() 606 recv_cq = to_c2cq(qp->ibqp.recv_cq); c2_free_qp() 613 c2_free_qpn(c2dev, qp->qpn); c2_free_qp() 617 * Destroy qp in the rnic... c2_free_qp() 619 destroy_qp(c2dev, qp); c2_free_qp() 624 c2_cq_clean(c2dev, qp, send_cq->cqn); c2_free_qp() 626 c2_cq_clean(c2dev, qp, recv_cq->cqn); c2_free_qp() 631 iounmap(qp->sq_mq.peer); c2_free_qp() 632 iounmap(qp->rq_mq.peer); c2_free_qp() 633 c2_free_mqsp(qp->sq_mq.shared); c2_free_qp() 634 c2_free_mqsp(qp->rq_mq.shared); c2_free_qp() 636 atomic_dec(&qp->refcount); c2_free_qp() 637 wait_event(qp->wait, !atomic_read(&qp->refcount)); c2_free_qp() 754 * qp - ptr to user qp 762 static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) qp_wr_post() argument 794 struct c2_qp *qp = to_c2qp(ibqp); c2_post_send() local 804 if (qp->state > IB_QPS_RTS) { c2_post_send() 837 if (ib_wr->num_sge > qp->send_sgl_depth) { c2_post_send() 855 if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { c2_post_send() 922 spin_lock_irqsave(&qp->lock, lock_flags); c2_post_send() 923 err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); c2_post_send() 925 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_send() 932 c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); c2_post_send() 933 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_send() 948 struct c2_qp *qp = to_c2qp(ibqp); c2_post_receive() local 953 if (qp->state > IB_QPS_RTS) { c2_post_receive() 965 if (ib_wr->num_sge > qp->recv_sgl_depth) { c2_post_receive() 993 spin_lock_irqsave(&qp->lock, lock_flags); c2_post_receive() 994 err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); c2_post_receive() 996 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_receive() 1003 c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); c2_post_receive() 1004 spin_unlock_irqrestore(&qp->lock, lock_flags); c2_post_receive() 415 c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) c2_alloc_qp() argument
|
H A D | c2_ae.c | 186 struct c2_qp *qp = (struct c2_qp *)resource_user_context; c2_ae_event() local 187 struct iw_cm_id *cm_id = qp->cm_id; c2_ae_event() 191 pr_debug("event received, but cm_id is <nul>, qp=%p!\n", c2_ae_event() 192 qp); c2_ae_event() 205 c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); c2_ae_event() 220 spin_lock_irqsave(&qp->lock, flags); c2_ae_event() 221 if (qp->cm_id) { c2_ae_event() 222 qp->cm_id->rem_ref(qp->cm_id); c2_ae_event() 223 qp->cm_id = NULL; c2_ae_event() 225 spin_unlock_irqrestore(&qp->lock, flags); c2_ae_event() 235 ib_event.element.qp = &qp->ibqp; c2_ae_event() 238 if (qp->ibqp.event_handler) c2_ae_event() 239 qp->ibqp.event_handler(&ib_event, c2_ae_event() 240 qp->ibqp. c2_ae_event() 249 spin_lock_irqsave(&qp->lock, flags); c2_ae_event() 250 if (qp->cm_id) { c2_ae_event() 251 qp->cm_id->rem_ref(qp->cm_id); c2_ae_event() 252 qp->cm_id = NULL; c2_ae_event() 254 spin_unlock_irqrestore(&qp->lock, flags); c2_ae_event() 265 event_id, qp, cm_id); c2_ae_event()
|
H A D | c2_cm.c | 45 struct c2_qp *qp; c2_llp_connect() local 57 qp = to_c2qp(ibqp); c2_llp_connect() 60 cm_id->provider_data = qp; c2_llp_connect() 62 qp->cm_id = cm_id; c2_llp_connect() 74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); c2_llp_connect() 96 wr->qp_handle = qp->adapter_handle; c2_llp_connect() 129 qp->cm_id = NULL; c2_llp_connect() 294 struct c2_qp *qp; c2_llp_accept() local 304 qp = to_c2qp(ibqp); c2_llp_accept() 307 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); c2_llp_accept() 317 vq_req->qp = qp; c2_llp_accept() 332 wr->qp_handle = qp->adapter_handle; c2_llp_accept() 335 cm_id->provider_data = qp; c2_llp_accept() 337 qp->cm_id = cm_id; c2_llp_accept() 339 cm_id->provider_data = qp; c2_llp_accept() 380 c2_set_qp_state(qp, C2_QP_STATE_RTS); c2_llp_accept() 391 qp->cm_id = NULL; c2_llp_accept()
|
H A D | c2_provider.c | 204 struct c2_qp *qp; c2_add_ref() local 206 qp = to_c2qp(ibqp); c2_add_ref() 207 atomic_inc(&qp->refcount); c2_add_ref() 212 struct c2_qp *qp; c2_rem_ref() local 214 qp = to_c2qp(ibqp); c2_rem_ref() 215 if (atomic_dec_and_test(&qp->refcount)) c2_rem_ref() 216 wake_up(&qp->wait); c2_rem_ref() 222 struct c2_qp *qp; c2_get_qp() local 224 qp = c2_find_qpn(c2dev, qpn); c2_get_qp() 226 __func__, qp, qpn, device, c2_get_qp() 227 (qp?atomic_read(&qp->refcount):0)); c2_get_qp() 229 return (qp?&qp->ibqp:NULL); c2_get_qp() 236 struct c2_qp *qp; c2_create_qp() local 246 qp = kzalloc(sizeof(*qp), GFP_KERNEL); c2_create_qp() 247 if (!qp) { c2_create_qp() 251 spin_lock_init(&qp->lock); c2_create_qp() 257 to_c2pd(pd), init_attr, qp); c2_create_qp() 271 kfree(qp); c2_create_qp() 275 return &qp->ibqp; c2_create_qp() 280 struct c2_qp *qp = to_c2qp(ib_qp); c2_destroy_qp() local 282 pr_debug("%s:%u qp=%p,qp->state=%d\n", c2_destroy_qp() 283 __func__, __LINE__, ib_qp, qp->state); c2_destroy_qp() 284 c2_free_qp(to_c2dev(ib_qp->device), qp); c2_destroy_qp() 285 kfree(qp); c2_destroy_qp()
|
H A D | c2_cq.c | 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) c2_cq_clean() argument 100 if (msg->qp_user_context == (u64) (unsigned long) qp) { c2_cq_clean() 135 struct c2_qp *qp; c2_poll_one() local 144 * if the qp returned is null then this qp has already c2_poll_one() 148 while ((qp = c2_poll_one() 158 entry->qp = &qp->ibqp; c2_poll_one() 190 c2_mq_lconsume(&qp->rq_mq, 1); c2_poll_one() 192 c2_mq_lconsume(&qp->sq_mq, c2_poll_one()
|
H A D | c2.h | 489 struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); 490 extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); 492 extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, 494 extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, 516 extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
|
H A D | c2_vq.h | 47 struct c2_qp *qp; member in struct:c2_vq_req
|
H A D | c2_intr.c | 185 c2_set_qp_state(req->qp, handle_vq()
|
H A D | c2_vq.c | 113 r->qp = NULL; vq_req_alloc()
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 1125 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_add_qpn_map() argument 1129 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) { ocrdma_add_qpn_map() 1130 dev->qp_tbl[qp->id] = qp; ocrdma_add_qpn_map() 1136 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_del_qpn_map() argument 1138 dev->qp_tbl[qp->id] = NULL; ocrdma_del_qpn_map() 1148 pr_err("%s(%d) unsupported qp type=0x%x requested\n", ocrdma_check_qp_params() 1214 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, ocrdma_copy_qp_uresp() argument 1221 struct ocrdma_pd *pd = qp->pd; ocrdma_copy_qp_uresp() 1227 uresp.qp_id = qp->id; ocrdma_copy_qp_uresp() 1228 uresp.sq_dbid = qp->sq.dbid; ocrdma_copy_qp_uresp() 1230 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); ocrdma_copy_qp_uresp() 1231 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); ocrdma_copy_qp_uresp() 1232 uresp.num_wqe_allocated = qp->sq.max_cnt; ocrdma_copy_qp_uresp() 1234 uresp.rq_dbid = qp->rq.dbid; ocrdma_copy_qp_uresp() 1236 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); ocrdma_copy_qp_uresp() 1237 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); ocrdma_copy_qp_uresp() 1238 uresp.num_rqe_allocated = qp->rq.max_cnt; ocrdma_copy_qp_uresp() 1246 if (qp->dpp_enabled) { ocrdma_copy_qp_uresp() 1273 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_set_qp_db() argument 1277 qp->sq_db = dev->nic_info.db + ocrdma_set_qp_db() 1280 qp->rq_db = dev->nic_info.db + ocrdma_set_qp_db() 1284 qp->sq_db = dev->nic_info.db + ocrdma_set_qp_db() 1287 qp->rq_db = dev->nic_info.db + ocrdma_set_qp_db() 1293 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp) ocrdma_alloc_wr_id_tbl() argument 1295 qp->wqe_wr_id_tbl = ocrdma_alloc_wr_id_tbl() 1296 kzalloc(sizeof(*(qp->wqe_wr_id_tbl)) * qp->sq.max_cnt, ocrdma_alloc_wr_id_tbl() 1298 if (qp->wqe_wr_id_tbl == NULL) ocrdma_alloc_wr_id_tbl() 1300 qp->rqe_wr_id_tbl = ocrdma_alloc_wr_id_tbl() 1301 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); ocrdma_alloc_wr_id_tbl() 1302 if (qp->rqe_wr_id_tbl == NULL) ocrdma_alloc_wr_id_tbl() 1308 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp, ocrdma_set_qp_init_params() argument 1312 qp->pd = pd; ocrdma_set_qp_init_params() 1313 spin_lock_init(&qp->q_lock); ocrdma_set_qp_init_params() 1314 INIT_LIST_HEAD(&qp->sq_entry); ocrdma_set_qp_init_params() 1315 INIT_LIST_HEAD(&qp->rq_entry); ocrdma_set_qp_init_params() 1317 qp->qp_type = attrs->qp_type; ocrdma_set_qp_init_params() 1318 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR; ocrdma_set_qp_init_params() 1319 qp->max_inline_data = attrs->cap.max_inline_data; ocrdma_set_qp_init_params() 1320 qp->sq.max_sges = attrs->cap.max_send_sge; ocrdma_set_qp_init_params() 1321 qp->rq.max_sges = attrs->cap.max_recv_sge; ocrdma_set_qp_init_params() 1322 qp->state = OCRDMA_QPS_RST; ocrdma_set_qp_init_params() 1323 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; ocrdma_set_qp_init_params() 1342 struct ocrdma_qp *qp; ocrdma_create_qp() local 1356 qp = kzalloc(sizeof(*qp), GFP_KERNEL); ocrdma_create_qp() 1357 if (!qp) { ocrdma_create_qp() 1361 ocrdma_set_qp_init_params(qp, pd, attrs); ocrdma_create_qp() 1363 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | ocrdma_create_qp() 1367 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq, ocrdma_create_qp() 1375 status = ocrdma_alloc_wr_id_tbl(qp); ocrdma_create_qp() 1380 status = ocrdma_add_qpn_map(dev, qp); ocrdma_create_qp() 1383 ocrdma_set_qp_db(dev, qp, pd); ocrdma_create_qp() 1385 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset, ocrdma_create_qp() 1392 qp->ibqp.qp_num = qp->id; ocrdma_create_qp() 1394 return &qp->ibqp; ocrdma_create_qp() 1397 ocrdma_del_qpn_map(dev, qp); ocrdma_create_qp() 1399 ocrdma_mbx_destroy_qp(dev, qp); ocrdma_create_qp() 1402 kfree(qp->wqe_wr_id_tbl); ocrdma_create_qp() 1403 kfree(qp->rqe_wr_id_tbl); ocrdma_create_qp() 1404 kfree(qp); ocrdma_create_qp() 1414 struct ocrdma_qp *qp; _ocrdma_modify_qp() local 1418 qp = get_ocrdma_qp(ibqp); _ocrdma_modify_qp() 1421 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); _ocrdma_modify_qp() 1427 status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); _ocrdma_modify_qp() 1437 struct ocrdma_qp *qp; ocrdma_modify_qp() local 1441 qp = get_ocrdma_qp(ibqp); ocrdma_modify_qp() 1447 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_modify_qp() 1448 old_qps = get_ibqp_state(qp->state); ocrdma_modify_qp() 1453 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_modify_qp() 1459 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, ocrdma_modify_qp() 1508 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_query_qp() local 1513 status = ocrdma_mbx_query_qp(dev, qp, ¶ms); ocrdma_query_qp() 1517 if (qp->qp_type == IB_QPT_UD) ocrdma_query_qp() 1529 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags); ocrdma_query_qp() 1530 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; ocrdma_query_qp() 1531 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; ocrdma_query_qp() 1532 qp_attr->cap.max_send_sge = qp->sq.max_sges; ocrdma_query_qp() 1533 qp_attr->cap.max_recv_sge = qp->rq.max_sges; ocrdma_query_qp() 1534 qp_attr->cap.max_inline_data = qp->max_inline_data; ocrdma_query_qp() 1540 qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; ocrdma_query_qp() 1583 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); ocrdma_query_qp() 1601 static int is_hw_sq_empty(struct ocrdma_qp *qp) is_hw_sq_empty() argument 1603 return (qp->sq.tail == qp->sq.head); is_hw_sq_empty() 1606 static int is_hw_rq_empty(struct ocrdma_qp *qp) is_hw_rq_empty() argument 1608 return (qp->rq.tail == qp->rq.head); is_hw_rq_empty() 1633 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) ocrdma_discard_cqes() argument 1645 * find the matching CQE for a given qp, ocrdma_discard_cqes() 1655 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp))) ocrdma_discard_cqes() 1665 /* check for matching qp */ ocrdma_discard_cqes() 1666 if (qpn == 0 || qpn != qp->id) ocrdma_discard_cqes() 1670 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_discard_cqes() 1672 if (qp->srq) { ocrdma_discard_cqes() 1675 qp->srq->rq.max_wqe_idx; ocrdma_discard_cqes() 1678 spin_lock_irqsave(&qp->srq->q_lock, flags); ocrdma_discard_cqes() 1679 ocrdma_hwq_inc_tail(&qp->srq->rq); ocrdma_discard_cqes() 1680 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); ocrdma_discard_cqes() 1681 spin_unlock_irqrestore(&qp->srq->q_lock, flags); ocrdma_discard_cqes() 1684 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_discard_cqes() 1698 void ocrdma_del_flush_qp(struct ocrdma_qp *qp) ocrdma_del_flush_qp() argument 1702 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_del_flush_qp() 1706 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); ocrdma_del_flush_qp() 1708 list_del(&qp->sq_entry); ocrdma_del_flush_qp() 1709 if (!qp->srq) { ocrdma_del_flush_qp() 1710 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); ocrdma_del_flush_qp() 1712 list_del(&qp->rq_entry); ocrdma_del_flush_qp() 1720 struct ocrdma_qp *qp; ocrdma_destroy_qp() local 1726 qp = get_ocrdma_qp(ibqp); ocrdma_destroy_qp() 1729 pd = qp->pd; ocrdma_destroy_qp() 1732 if (qp->state != OCRDMA_QPS_RST) { ocrdma_destroy_qp() 1742 (void) ocrdma_mbx_destroy_qp(dev, qp); ocrdma_destroy_qp() 1748 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags); ocrdma_destroy_qp() 1749 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) ocrdma_destroy_qp() 1750 spin_lock(&qp->rq_cq->cq_lock); ocrdma_destroy_qp() 1752 ocrdma_del_qpn_map(dev, qp); ocrdma_destroy_qp() 1754 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) ocrdma_destroy_qp() 1755 spin_unlock(&qp->rq_cq->cq_lock); ocrdma_destroy_qp() 1756 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags); ocrdma_destroy_qp() 1759 ocrdma_discard_cqes(qp, qp->sq_cq); ocrdma_destroy_qp() 1760 ocrdma_discard_cqes(qp, qp->rq_cq); ocrdma_destroy_qp() 1765 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, ocrdma_destroy_qp() 1766 PAGE_ALIGN(qp->sq.len)); ocrdma_destroy_qp() 1767 if (!qp->srq) ocrdma_destroy_qp() 1768 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, ocrdma_destroy_qp() 1769 PAGE_ALIGN(qp->rq.len)); ocrdma_destroy_qp() 1772 ocrdma_del_flush_qp(qp); ocrdma_destroy_qp() 1774 kfree(qp->wqe_wr_id_tbl); ocrdma_destroy_qp() 1775 kfree(qp->rqe_wr_id_tbl); ocrdma_destroy_qp() 1776 kfree(qp); ocrdma_destroy_qp() 1924 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, ocrdma_build_ud_hdr() argument 1933 if (qp->qp_type == IB_QPT_GSI) ocrdma_build_ud_hdr() 1934 ud_hdr->qkey = qp->qkey; ocrdma_build_ud_hdr() 1969 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, ocrdma_build_inline_sges() argument 1977 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) { ocrdma_build_inline_sges() 1979 if (unlikely(hdr->total_len > qp->max_inline_data)) { ocrdma_build_inline_sges() 1982 qp->max_inline_data, hdr->total_len); ocrdma_build_inline_sges() 2009 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_send() argument 2016 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_build_send() 2017 ocrdma_build_ud_hdr(qp, hdr, wr); ocrdma_build_send() 2024 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); ocrdma_build_send() 2028 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_write() argument 2036 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); ocrdma_build_write() 2046 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_read() argument 2110 static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, ocrdma_build_fr() argument 2116 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_build_fr() 2154 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) ocrdma_ring_sq_db() argument 2156 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); ocrdma_ring_sq_db() 2158 iowrite32(val, qp->sq_db); ocrdma_ring_sq_db() 2165 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_post_send() local 2169 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_post_send() 2170 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) { ocrdma_post_send() 2171 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_send() 2177 if (qp->qp_type == IB_QPT_UD && ocrdma_post_send() 2184 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || ocrdma_post_send() 2185 wr->num_sge > qp->sq.max_sges) { ocrdma_post_send() 2190 hdr = ocrdma_hwq_head(&qp->sq); ocrdma_post_send() 2192 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) ocrdma_post_send() 2207 ocrdma_build_send(qp, hdr, wr); ocrdma_post_send() 2213 status = ocrdma_build_send(qp, hdr, wr); ocrdma_post_send() 2220 status = ocrdma_build_write(qp, hdr, wr); ocrdma_post_send() 2223 ocrdma_build_read(qp, hdr, wr); ocrdma_post_send() 2234 status = ocrdma_build_fr(qp, hdr, wr); ocrdma_post_send() 2244 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled) ocrdma_post_send() 2245 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; ocrdma_post_send() 2247 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; ocrdma_post_send() 2248 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; ocrdma_post_send() 2254 ocrdma_ring_sq_db(qp); ocrdma_post_send() 2257 ocrdma_hwq_inc_head(&qp->sq); ocrdma_post_send() 2260 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_send() 2264 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) ocrdma_ring_rq_db() argument 2266 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); ocrdma_ring_rq_db() 2268 iowrite32(val, qp->rq_db); ocrdma_ring_rq_db() 2297 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); ocrdma_post_recv() local 2300 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_post_recv() 2301 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) { ocrdma_post_recv() 2302 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_recv() 2307 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 || ocrdma_post_recv() 2308 wr->num_sge > qp->rq.max_sges) { ocrdma_post_recv() 2313 rqe = ocrdma_hwq_head(&qp->rq); ocrdma_post_recv() 2316 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id; ocrdma_post_recv() 2321 ocrdma_ring_rq_db(qp); ocrdma_post_recv() 2324 ocrdma_hwq_inc_head(&qp->rq); ocrdma_post_recv() 2327 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_post_recv() 2475 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, ocrdma_update_wc() argument 2482 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); ocrdma_update_wc() 2484 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid; ocrdma_update_wc() 2513 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, ocrdma_set_cqe_status_flushed() argument 2525 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_set_cqe_status_flushed() 2546 struct ocrdma_qp *qp, int status) ocrdma_update_err_cqe() 2551 ibwc->qp = &qp->ibqp; ocrdma_update_err_cqe() 2554 ocrdma_flush_qp(qp); ocrdma_update_err_cqe() 2555 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL); ocrdma_update_err_cqe() 2560 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) { ocrdma_update_err_cqe() 2562 ocrdma_set_cqe_status_flushed(qp, cqe); ocrdma_update_err_cqe() 2568 struct ocrdma_qp *qp, int status) ocrdma_update_err_rcqe() 2571 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_update_err_rcqe() 2572 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_update_err_rcqe() 2574 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); ocrdma_update_err_rcqe() 2578 struct ocrdma_qp *qp, int status) ocrdma_update_err_scqe() 2580 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_update_err_scqe() 2581 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_update_err_scqe() 2583 return ocrdma_update_err_cqe(ibwc, cqe, qp, status); ocrdma_update_err_scqe() 2587 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, ocrdma_poll_err_scqe() argument 2592 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_poll_err_scqe() 2601 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) { ocrdma_poll_err_scqe() 2605 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { ocrdma_poll_err_scqe() 2608 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); ocrdma_poll_err_scqe() 2619 } else if (is_hw_sq_empty(qp)) { ocrdma_poll_err_scqe() 2626 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); ocrdma_poll_err_scqe() 2631 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, ocrdma_poll_success_scqe() argument 2636 int tail = qp->sq.tail; ocrdma_poll_success_scqe() 2639 if (!qp->wqe_wr_id_tbl[tail].signaled) { ocrdma_poll_success_scqe() 2644 ibwc->qp = &qp->ibqp; ocrdma_poll_success_scqe() 2645 ocrdma_update_wc(qp, ibwc, tail); ocrdma_poll_success_scqe() 2649 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; ocrdma_poll_success_scqe() 2653 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_poll_success_scqe() 2657 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_scqe() argument 2667 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled); ocrdma_poll_scqe() 2669 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop); ocrdma_poll_scqe() 2691 struct ocrdma_qp *qp) ocrdma_update_free_srq_cqe() 2697 srq = get_ocrdma_srq(qp->ibqp.srq); ocrdma_update_free_srq_cqe() 2710 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_err_rcqe() argument 2715 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_poll_err_rcqe() 2723 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) { ocrdma_poll_err_rcqe() 2724 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) { ocrdma_poll_err_rcqe() 2727 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status); ocrdma_poll_err_rcqe() 2733 } else if (is_hw_rq_empty(qp)) { ocrdma_poll_err_rcqe() 2740 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); ocrdma_poll_err_rcqe() 2745 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, ocrdma_poll_success_rcqe() argument 2749 ibwc->qp = &qp->ibqp; ocrdma_poll_success_rcqe() 2752 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) ocrdma_poll_success_rcqe() 2768 if (qp->ibqp.srq) { ocrdma_poll_success_rcqe() 2769 ocrdma_update_free_srq_cqe(ibwc, cqe, qp); ocrdma_poll_success_rcqe() 2771 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_poll_success_rcqe() 2772 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_poll_success_rcqe() 2776 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, ocrdma_poll_rcqe() argument 2783 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) { ocrdma_poll_rcqe() 2794 ocrdma_poll_success_rcqe(qp, cqe, ibwc); ocrdma_poll_rcqe() 2796 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop, ocrdma_poll_rcqe() 2821 struct ocrdma_qp *qp = NULL; ocrdma_poll_hwcq() local 2836 qp = dev->qp_tbl[qpn]; ocrdma_poll_hwcq() 2837 BUG_ON(qp == NULL); ocrdma_poll_hwcq() 2840 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled, ocrdma_poll_hwcq() 2843 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled, ocrdma_poll_hwcq() 2883 struct ocrdma_qp *qp, struct ib_wc *ibwc) ocrdma_add_err_cqe() 2888 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp)) ocrdma_add_err_cqe() 2890 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { ocrdma_add_err_cqe() 2891 ocrdma_update_wc(qp, ibwc, qp->sq.tail); ocrdma_add_err_cqe() 2892 ocrdma_hwq_inc_tail(&qp->sq); ocrdma_add_err_cqe() 2893 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { ocrdma_add_err_cqe() 2894 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail]; ocrdma_add_err_cqe() 2895 ocrdma_hwq_inc_tail(&qp->rq); ocrdma_add_err_cqe() 2914 struct ocrdma_qp *qp; ocrdma_poll_cq() local 2925 /* adapter returns single error cqe when qp moves to ocrdma_poll_cq() 2931 list_for_each_entry(qp, &cq->sq_head, sq_entry) { ocrdma_poll_cq() 2934 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); ocrdma_poll_cq() 2545 ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_cqe() argument 2567 ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_rcqe() argument 2577 ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp, int status) ocrdma_update_err_scqe() argument 2689 ocrdma_update_free_srq_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe, struct ocrdma_qp *qp) ocrdma_update_free_srq_cqe() argument 2882 ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, struct ocrdma_qp *qp, struct ib_wc *ibwc) ocrdma_add_err_cqe() argument
|
H A D | ocrdma_hw.c | 644 struct ocrdma_qp *qp) ocrdma_process_qpcat_error() 649 if (qp == NULL) ocrdma_process_qpcat_error() 651 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps); ocrdma_process_qpcat_error() 657 struct ocrdma_qp *qp = NULL; ocrdma_dispatch_ibevent() local 668 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; ocrdma_dispatch_ibevent() 690 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent() 692 ocrdma_process_qpcat_error(dev, qp); ocrdma_dispatch_ibevent() 695 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent() 699 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent() 703 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent() 713 ib_evt.element.srq = &qp->srq->ibsrq; ocrdma_dispatch_ibevent() 719 ib_evt.element.srq = &qp->srq->ibsrq; ocrdma_dispatch_ibevent() 725 ib_evt.element.qp = &qp->ibqp; ocrdma_dispatch_ibevent() 741 if (qp->ibqp.event_handler) ocrdma_dispatch_ibevent() 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); ocrdma_dispatch_ibevent() 747 if (qp->srq->ibsrq.event_handler) ocrdma_dispatch_ibevent() 748 qp->srq->ibsrq.event_handler(&ib_evt, ocrdma_dispatch_ibevent() 749 qp->srq->ibsrq. ocrdma_dispatch_ibevent() 840 struct ocrdma_qp *qp; _ocrdma_qp_buddy_cq_handler() local 847 qp = list_entry(cur, struct ocrdma_qp, sq_entry); list_for_each() 849 qp = list_entry(cur, struct ocrdma_qp, rq_entry); list_for_each() 851 if (qp->srq) list_for_each() 856 if (qp->sq_cq == qp->rq_cq) list_for_each() 861 if (qp->sq_cq == cq) list_for_each() 862 bcq = qp->rq_cq; list_for_each() 864 bcq = qp->sq_cq; list_for_each() 2019 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) ocrdma_is_qp_in_sq_flushlist() argument 2024 if (qp == tmp) { ocrdma_is_qp_in_sq_flushlist() 2032 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) ocrdma_is_qp_in_rq_flushlist() argument 2037 if (qp == tmp) { ocrdma_is_qp_in_rq_flushlist() 2045 void ocrdma_flush_qp(struct ocrdma_qp *qp) ocrdma_flush_qp() argument 2049 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_flush_qp() 2052 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); ocrdma_flush_qp() 2054 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); ocrdma_flush_qp() 2055 if (!qp->srq) { ocrdma_flush_qp() 2056 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp); ocrdma_flush_qp() 2058 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); ocrdma_flush_qp() 2063 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) ocrdma_init_hwq_ptr() argument 2065 qp->sq.head = 0; ocrdma_init_hwq_ptr() 2066 qp->sq.tail = 0; ocrdma_init_hwq_ptr() 2067 qp->rq.head = 0; ocrdma_init_hwq_ptr() 2068 qp->rq.tail = 0; ocrdma_init_hwq_ptr() 2071 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, ocrdma_qp_state_change() argument 2080 spin_lock_irqsave(&qp->q_lock, flags); ocrdma_qp_state_change() 2083 *old_ib_state = get_ibqp_state(qp->state); ocrdma_qp_state_change() 2084 if (new_state == qp->state) { ocrdma_qp_state_change() 2085 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_qp_state_change() 2091 ocrdma_init_hwq_ptr(qp); ocrdma_qp_state_change() 2092 ocrdma_del_flush_qp(qp); ocrdma_qp_state_change() 2094 ocrdma_flush_qp(qp); ocrdma_qp_state_change() 2097 qp->state = new_state; ocrdma_qp_state_change() 2099 spin_unlock_irqrestore(&qp->q_lock, flags); ocrdma_qp_state_change() 2103 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) ocrdma_set_create_qp_mbx_access_flags() argument 2106 if (qp->cap_flags & OCRDMA_QP_INB_RD) ocrdma_set_create_qp_mbx_access_flags() 2108 if (qp->cap_flags & OCRDMA_QP_INB_WR) ocrdma_set_create_qp_mbx_access_flags() 2110 if (qp->cap_flags & OCRDMA_QP_MW_BIND) ocrdma_set_create_qp_mbx_access_flags() 2112 if (qp->cap_flags & OCRDMA_QP_LKEY0) ocrdma_set_create_qp_mbx_access_flags() 2114 if (qp->cap_flags & OCRDMA_QP_FAST_REG) ocrdma_set_create_qp_mbx_access_flags() 2121 struct ocrdma_qp *qp) ocrdma_set_create_qp_sq_cmd() 2126 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_sq_cmd() 2143 qp->sq.max_cnt = max_wqe_allocated; ocrdma_set_create_qp_sq_cmd() 2146 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); ocrdma_set_create_qp_sq_cmd() 2147 if (!qp->sq.va) ocrdma_set_create_qp_sq_cmd() 2149 memset(qp->sq.va, 0, len); ocrdma_set_create_qp_sq_cmd() 2150 qp->sq.len = len; ocrdma_set_create_qp_sq_cmd() 2151 qp->sq.pa = pa; ocrdma_set_create_qp_sq_cmd() 2152 qp->sq.entry_size = dev->attr.wqe_size; ocrdma_set_create_qp_sq_cmd() 2166 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << ocrdma_set_create_qp_sq_cmd() 2177 struct ocrdma_qp *qp) ocrdma_set_create_qp_rq_cmd() 2182 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_rq_cmd() 2194 qp->rq.max_cnt = max_rqe_allocated; ocrdma_set_create_qp_rq_cmd() 2197 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); ocrdma_set_create_qp_rq_cmd() 2198 if (!qp->rq.va) ocrdma_set_create_qp_rq_cmd() 2200 memset(qp->rq.va, 0, len); ocrdma_set_create_qp_rq_cmd() 2201 qp->rq.pa = pa; ocrdma_set_create_qp_rq_cmd() 2202 qp->rq.len = len; ocrdma_set_create_qp_rq_cmd() 2203 qp->rq.entry_size = dev->attr.rqe_size; ocrdma_set_create_qp_rq_cmd() 2214 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << ocrdma_set_create_qp_rq_cmd() 2225 struct ocrdma_qp *qp, ocrdma_set_create_qp_dpp_cmd() 2229 qp->dpp_enabled = true; ocrdma_set_create_qp_dpp_cmd() 2240 struct ocrdma_qp *qp) ocrdma_set_create_qp_ird_cmd() 2242 struct ocrdma_pd *pd = qp->pd; ocrdma_set_create_qp_ird_cmd() 2254 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, ocrdma_set_create_qp_ird_cmd() 2256 if (!qp->ird_q_va) ocrdma_set_create_qp_ird_cmd() 2258 memset(qp->ird_q_va, 0, ird_q_len); ocrdma_set_create_qp_ird_cmd() 2262 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va + ocrdma_set_create_qp_ird_cmd() 2274 struct ocrdma_qp *qp, ocrdma_get_create_qp_rsp() 2279 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK; ocrdma_get_create_qp_rsp() 2280 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK; ocrdma_get_create_qp_rsp() 2281 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; ocrdma_get_create_qp_rsp() 2282 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK; ocrdma_get_create_qp_rsp() 2283 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT); ocrdma_get_create_qp_rsp() 2284 qp->dpp_enabled = false; ocrdma_get_create_qp_rsp() 2286 qp->dpp_enabled = true; ocrdma_get_create_qp_rsp() 2299 qp->sq.max_cnt = max_wqe_allocated; ocrdma_get_create_qp_rsp() 2300 qp->sq.max_wqe_idx = max_wqe_allocated - 1; ocrdma_get_create_qp_rsp() 2303 qp->rq.max_cnt = max_rqe_allocated; ocrdma_get_create_qp_rsp() 2304 qp->rq.max_wqe_idx = max_rqe_allocated - 1; ocrdma_get_create_qp_rsp() 2308 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, ocrdma_mbx_create_qp() argument 2314 struct ocrdma_pd *pd = qp->pd; ocrdma_mbx_create_qp() 2341 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp); ocrdma_mbx_create_qp() 2349 qp->srq = srq; ocrdma_mbx_create_qp() 2351 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp); ocrdma_mbx_create_qp() 2356 status = ocrdma_set_create_qp_ird_cmd(cmd, qp); ocrdma_mbx_create_qp() 2363 flags = ocrdma_set_create_qp_mbx_access_flags(qp); ocrdma_mbx_create_qp() 2375 qp->sq_cq = cq; ocrdma_mbx_create_qp() 2379 qp->rq_cq = cq; ocrdma_mbx_create_qp() 2383 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, ocrdma_mbx_create_qp() 2391 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt); ocrdma_mbx_create_qp() 2392 qp->state = OCRDMA_QPS_RST; ocrdma_mbx_create_qp() 2396 if (qp->rq.va) ocrdma_mbx_create_qp() 2397 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); ocrdma_mbx_create_qp() 2400 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_create_qp() 2407 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_mbx_query_qp() argument 2417 cmd->qp_id = qp->id; ocrdma_mbx_query_qp() 2428 static int ocrdma_set_av_params(struct ocrdma_qp *qp, ocrdma_set_av_params() argument 2438 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_set_av_params() 2463 qp->sgid_idx = ah_attr->grh.sgid_index; ocrdma_set_av_params() 2495 static int ocrdma_set_qp_params(struct ocrdma_qp *qp, ocrdma_set_qp_params() argument 2500 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); ocrdma_set_qp_params() 2508 qp->qkey = attrs->qkey; ocrdma_set_qp_params() 2513 status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask); ocrdma_set_qp_params() 2516 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { ocrdma_set_qp_params() 2586 qp->max_ord = attrs->max_rd_atomic; ocrdma_set_qp_params() 2594 qp->max_ird = attrs->max_dest_rd_atomic; ocrdma_set_qp_params() 2597 cmd->params.max_ord_ird = (qp->max_ord << ocrdma_set_qp_params() 2599 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK); ocrdma_set_qp_params() 2604 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, ocrdma_mbx_modify_qp() argument 2614 cmd->params.id = qp->id; ocrdma_mbx_modify_qp() 2624 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) & ocrdma_mbx_modify_qp() 2628 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask); ocrdma_mbx_modify_qp() 2640 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_mbx_destroy_qp() argument 2649 cmd->qp_id = qp->id; ocrdma_mbx_destroy_qp() 2656 if (qp->sq.va) ocrdma_mbx_destroy_qp() 2657 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); ocrdma_mbx_destroy_qp() 2658 if (!qp->srq && qp->rq.va) ocrdma_mbx_destroy_qp() 2659 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); ocrdma_mbx_destroy_qp() 2660 if (qp->dpp_enabled) ocrdma_mbx_destroy_qp() 2661 qp->pd->num_dpp_qp++; ocrdma_mbx_destroy_qp() 643 ocrdma_process_qpcat_error(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ocrdma_process_qpcat_error() argument 2119 ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, struct ib_qp_init_attr *attrs, struct ocrdma_qp *qp) ocrdma_set_create_qp_sq_cmd() argument 2175 ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, struct ib_qp_init_attr *attrs, struct ocrdma_qp *qp) ocrdma_set_create_qp_rq_cmd() argument 2223 ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_pd *pd, struct ocrdma_qp *qp, u8 enable_dpp_cq, u16 dpp_cq_id) ocrdma_set_create_qp_dpp_cmd() argument 2239 ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_qp *qp) ocrdma_set_create_qp_ird_cmd() argument 2273 ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp, struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, u16 *dpp_offset, u16 *dpp_credit_lmt) ocrdma_get_create_qp_rsp() argument
|
H A D | ocrdma_main.c | 566 struct ocrdma_qp *qp, **cur_qp; ocrdma_close() local 576 qp = cur_qp[i]; ocrdma_close() 577 if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { ocrdma_close() 579 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); ocrdma_close() 582 err_event.element.qp = &qp->ibqp; ocrdma_close()
|
H A D | ocrdma_verbs.h | 75 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
|
H A D | ocrdma.h | 322 /* head of all qp's sq and rq for which cqes need to be flushed
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | verbs.c | 377 struct ib_qp *qp = context; __ib_shared_qp_event_handler() local 380 spin_lock_irqsave(&qp->device->event_handler_lock, flags); __ib_shared_qp_event_handler() 381 list_for_each_entry(event->element.qp, &qp->open_list, open_list) __ib_shared_qp_event_handler() 382 if (event->element.qp->event_handler) __ib_shared_qp_event_handler() 383 event->element.qp->event_handler(event, event->element.qp->qp_context); __ib_shared_qp_event_handler() 384 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags); __ib_shared_qp_event_handler() 387 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) __ib_insert_xrcd_qp() argument 390 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); __ib_insert_xrcd_qp() 398 struct ib_qp *qp; __ib_open_qp() local 401 qp = kzalloc(sizeof *qp, GFP_KERNEL); __ib_open_qp() 402 if (!qp) __ib_open_qp() 405 qp->real_qp = real_qp; __ib_open_qp() 407 qp->device = real_qp->device; __ib_open_qp() 408 qp->event_handler = event_handler; __ib_open_qp() 409 qp->qp_context = qp_context; __ib_open_qp() 410 qp->qp_num = real_qp->qp_num; __ib_open_qp() 411 qp->qp_type = real_qp->qp_type; __ib_open_qp() 414 list_add(&qp->open_list, &real_qp->open_list); __ib_open_qp() 417 return qp; __ib_open_qp() 423 struct ib_qp *qp, *real_qp; ib_open_qp() local 428 qp = ERR_PTR(-EINVAL); ib_open_qp() 432 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, ib_open_qp() 438 return qp; ib_open_qp() 445 struct ib_qp *qp, *real_qp; ib_create_qp() local 449 qp = device->create_qp(pd, qp_init_attr, NULL); ib_create_qp() 451 if (!IS_ERR(qp)) { ib_create_qp() 452 qp->device = device; ib_create_qp() 453 qp->real_qp = qp; ib_create_qp() 454 qp->uobject = NULL; ib_create_qp() 455 qp->qp_type = qp_init_attr->qp_type; ib_create_qp() 457 atomic_set(&qp->usecnt, 0); ib_create_qp() 459 qp->event_handler = __ib_shared_qp_event_handler; ib_create_qp() 460 qp->qp_context = qp; ib_create_qp() 461 qp->pd = NULL; ib_create_qp() 462 qp->send_cq = qp->recv_cq = NULL; ib_create_qp() 463 qp->srq = NULL; ib_create_qp() 464 qp->xrcd = qp_init_attr->xrcd; ib_create_qp() 466 INIT_LIST_HEAD(&qp->open_list); ib_create_qp() 468 real_qp = qp; ib_create_qp() 469 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, ib_create_qp() 471 if (!IS_ERR(qp)) ib_create_qp() 476 qp->event_handler = qp_init_attr->event_handler; ib_create_qp() 477 qp->qp_context = qp_init_attr->qp_context; ib_create_qp() 479 qp->recv_cq = NULL; ib_create_qp() 480 qp->srq = NULL; ib_create_qp() 482 qp->recv_cq = qp_init_attr->recv_cq; ib_create_qp() 484 qp->srq = qp_init_attr->srq; ib_create_qp() 485 if (qp->srq) ib_create_qp() 489 qp->pd = pd; ib_create_qp() 490 qp->send_cq = qp_init_attr->send_cq; ib_create_qp() 491 qp->xrcd = NULL; ib_create_qp() 498 return qp; ib_create_qp() 867 int ib_resolve_eth_l2_attrs(struct ib_qp *qp, ib_resolve_eth_l2_attrs() argument 874 (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) { ib_resolve_eth_l2_attrs() 875 ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, ib_resolve_eth_l2_attrs() 903 int ib_modify_qp(struct ib_qp *qp, ib_modify_qp() argument 909 ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask); ib_modify_qp() 913 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); ib_modify_qp() 917 int ib_query_qp(struct ib_qp *qp, ib_query_qp() argument 922 return qp->device->query_qp ? ib_query_qp() 923 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : ib_query_qp() 928 int ib_close_qp(struct ib_qp *qp) ib_close_qp() argument 933 real_qp = qp->real_qp; ib_close_qp() 934 if (real_qp == qp) ib_close_qp() 938 list_del(&qp->open_list); ib_close_qp() 942 kfree(qp); ib_close_qp() 948 static int __ib_destroy_shared_qp(struct ib_qp *qp) __ib_destroy_shared_qp() argument 954 real_qp = qp->real_qp; __ib_destroy_shared_qp() 958 ib_close_qp(qp); __ib_destroy_shared_qp() 976 int ib_destroy_qp(struct ib_qp *qp) ib_destroy_qp() argument 983 if (atomic_read(&qp->usecnt)) ib_destroy_qp() 986 if (qp->real_qp != qp) ib_destroy_qp() 987 return __ib_destroy_shared_qp(qp); ib_destroy_qp() 989 pd = qp->pd; ib_destroy_qp() 990 scq = qp->send_cq; ib_destroy_qp() 991 rcq = qp->recv_cq; ib_destroy_qp() 992 srq = qp->srq; ib_destroy_qp() 994 ret = qp->device->destroy_qp(qp); ib_destroy_qp() 1344 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) ib_attach_mcast() argument 1348 if (!qp->device->attach_mcast) ib_attach_mcast() 1350 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) ib_attach_mcast() 1353 ret = qp->device->attach_mcast(qp, gid, lid); ib_attach_mcast() 1355 atomic_inc(&qp->usecnt); ib_attach_mcast() 1360 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) ib_detach_mcast() argument 1364 if (!qp->device->detach_mcast) ib_detach_mcast() 1366 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) ib_detach_mcast() 1369 ret = qp->device->detach_mcast(qp, gid, lid); ib_detach_mcast() 1371 atomic_dec(&qp->usecnt); ib_detach_mcast() 1398 struct ib_qp *qp; ib_dealloc_xrcd() local 1405 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); ib_dealloc_xrcd() 1406 ret = ib_destroy_qp(qp); ib_dealloc_xrcd() 1415 struct ib_flow *ib_create_flow(struct ib_qp *qp, ib_create_flow() argument 1420 if (!qp->device->create_flow) ib_create_flow() 1423 flow_id = qp->device->create_flow(qp, flow_attr, domain); ib_create_flow() 1425 atomic_inc(&qp->usecnt); ib_create_flow() 1433 struct ib_qp *qp = flow_id->qp; ib_destroy_flow() local 1435 err = qp->device->destroy_flow(flow_id); ib_destroy_flow() 1437 atomic_dec(&qp->usecnt); ib_destroy_flow()
|
H A D | iwcm.c | 245 static int iwcm_modify_qp_err(struct ib_qp *qp) iwcm_modify_qp_err() argument 249 if (!qp) iwcm_modify_qp_err() 253 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); iwcm_modify_qp_err() 260 static int iwcm_modify_qp_sqd(struct ib_qp *qp) iwcm_modify_qp_sqd() argument 264 BUG_ON(qp == NULL); iwcm_modify_qp_sqd() 266 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); iwcm_modify_qp_sqd() 286 struct ib_qp *qp = NULL; iw_cm_disconnect() local 299 if (cm_id_priv->qp) iw_cm_disconnect() 300 qp = cm_id_priv->qp; iw_cm_disconnect() 325 if (qp) { iw_cm_disconnect() 327 ret = iwcm_modify_qp_err(qp); iw_cm_disconnect() 329 ret = iwcm_modify_qp_sqd(qp); iw_cm_disconnect() 374 (void)iwcm_modify_qp_err(cm_id_priv->qp); destroy_cm_id() 399 if (cm_id_priv->qp) { destroy_cm_id() 400 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); destroy_cm_id() 401 cm_id_priv->qp = NULL; destroy_cm_id() 516 struct ib_qp *qp; iw_cm_accept() local 531 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); iw_cm_accept() 532 if (!qp) { iw_cm_accept() 538 cm_id->device->iwcm->add_ref(qp); iw_cm_accept() 539 cm_id_priv->qp = qp; iw_cm_accept() 548 if (cm_id_priv->qp) { iw_cm_accept() 549 cm_id->device->iwcm->rem_ref(qp); iw_cm_accept() 550 cm_id_priv->qp = NULL; iw_cm_accept() 573 struct ib_qp *qp; iw_cm_connect() local 592 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); iw_cm_connect() 593 if (!qp) { iw_cm_connect() 599 cm_id->device->iwcm->add_ref(qp); iw_cm_connect() 600 cm_id_priv->qp = qp; iw_cm_connect() 607 if (cm_id_priv->qp) { iw_cm_connect() 608 cm_id->device->iwcm->rem_ref(qp); iw_cm_connect() 609 cm_id_priv->qp = NULL; iw_cm_connect() 761 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); cm_conn_rep_handler() 762 cm_id_priv->qp = NULL; cm_conn_rep_handler() 811 if (cm_id_priv->qp) { cm_close_handler() 812 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); cm_close_handler() 813 cm_id_priv->qp = NULL; cm_close_handler()
|
H A D | uverbs_cmd.c | 252 static void put_qp_read(struct ib_qp *qp) put_qp_read() argument 254 put_uobj_read(qp->uobject); put_qp_read() 257 static void put_qp_write(struct ib_qp *qp) put_qp_write() argument 259 put_uobj_write(qp->uobject); put_qp_write() 1481 tmp.qp_num = wc->qp->qp_num; copy_wc_to_user() 1636 struct ib_qp *qp; ib_uverbs_create_qp() local 1720 qp = ib_create_qp(pd, &attr); ib_uverbs_create_qp() 1722 qp = device->create_qp(pd, &attr, &udata); ib_uverbs_create_qp() 1724 if (IS_ERR(qp)) { ib_uverbs_create_qp() 1725 ret = PTR_ERR(qp); ib_uverbs_create_qp() 1730 qp->real_qp = qp; ib_uverbs_create_qp() 1731 qp->device = device; ib_uverbs_create_qp() 1732 qp->pd = pd; ib_uverbs_create_qp() 1733 qp->send_cq = attr.send_cq; ib_uverbs_create_qp() 1734 qp->recv_cq = attr.recv_cq; ib_uverbs_create_qp() 1735 qp->srq = attr.srq; ib_uverbs_create_qp() 1736 qp->event_handler = attr.event_handler; ib_uverbs_create_qp() 1737 qp->qp_context = attr.qp_context; ib_uverbs_create_qp() 1738 qp->qp_type = attr.qp_type; ib_uverbs_create_qp() 1739 atomic_set(&qp->usecnt, 0); ib_uverbs_create_qp() 1747 qp->uobject = &obj->uevent.uobject; ib_uverbs_create_qp() 1749 obj->uevent.uobject.object = qp; ib_uverbs_create_qp() 1755 resp.qpn = qp->qp_num; ib_uverbs_create_qp() 1799 ib_destroy_qp(qp); ib_uverbs_create_qp() 1826 struct ib_qp *qp; ib_uverbs_open_qp() local 1862 qp = ib_open_qp(xrcd, &attr); ib_uverbs_open_qp() 1863 if (IS_ERR(qp)) { ib_uverbs_open_qp() 1864 ret = PTR_ERR(qp); ib_uverbs_open_qp() 1868 qp->uobject = &obj->uevent.uobject; ib_uverbs_open_qp() 1870 obj->uevent.uobject.object = qp; ib_uverbs_open_qp() 1876 resp.qpn = qp->qp_num; ib_uverbs_open_qp() 1903 ib_destroy_qp(qp); ib_uverbs_open_qp() 1917 struct ib_qp *qp; ib_uverbs_query_qp() local 1932 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_query_qp() 1933 if (!qp) { ib_uverbs_query_qp() 1938 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr); ib_uverbs_query_qp() 1940 put_qp_read(qp); ib_uverbs_query_qp() 2031 struct ib_qp *qp; ib_uverbs_modify_qp() local 2045 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_modify_qp() 2046 if (!qp) { ib_uverbs_modify_qp() 2097 if (qp->real_qp == qp) { ib_uverbs_modify_qp() 2098 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); ib_uverbs_modify_qp() 2101 ret = qp->device->modify_qp(qp, attr, ib_uverbs_modify_qp() 2102 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); ib_uverbs_modify_qp() 2104 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); ib_uverbs_modify_qp() 2113 put_qp_read(qp); ib_uverbs_modify_qp() 2128 struct ib_qp *qp; ib_uverbs_destroy_qp() local 2140 qp = uobj->object; ib_uverbs_destroy_qp() 2148 ret = ib_destroy_qp(qp); ib_uverbs_destroy_qp() 2187 struct ib_qp *qp; ib_uverbs_post_send() local 2206 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_post_send() 2207 if (!qp) ib_uverbs_post_send() 2210 is_ud = qp->qp_type == IB_QPT_UD; ib_uverbs_post_send() 2317 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); ib_uverbs_post_send() 2330 put_qp_read(qp); ib_uverbs_post_send() 2438 struct ib_qp *qp; ib_uverbs_post_recv() local 2450 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_post_recv() 2451 if (!qp) ib_uverbs_post_recv() 2455 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); ib_uverbs_post_recv() 2457 put_qp_read(qp); ib_uverbs_post_recv() 2662 struct ib_qp *qp; ib_uverbs_attach_mcast() local 2670 qp = idr_write_qp(cmd.qp_handle, file->ucontext); ib_uverbs_attach_mcast() 2671 if (!qp) ib_uverbs_attach_mcast() 2674 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); ib_uverbs_attach_mcast() 2692 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); ib_uverbs_attach_mcast() 2699 put_qp_write(qp); ib_uverbs_attach_mcast() 2710 struct ib_qp *qp; ib_uverbs_detach_mcast() local 2717 qp = idr_write_qp(cmd.qp_handle, file->ucontext); ib_uverbs_detach_mcast() 2718 if (!qp) ib_uverbs_detach_mcast() 2721 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); ib_uverbs_detach_mcast() 2725 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); ib_uverbs_detach_mcast() 2736 put_qp_write(qp); ib_uverbs_detach_mcast() 2794 struct ib_qp *qp; ib_uverbs_ex_create_flow() local 2855 qp = idr_read_qp(cmd.qp_handle, file->ucontext); ib_uverbs_ex_create_flow() 2856 if (!qp) { ib_uverbs_ex_create_flow() 2895 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER); ib_uverbs_ex_create_flow() 2900 flow_id->qp = qp; ib_uverbs_ex_create_flow() 2916 put_qp_read(qp); ib_uverbs_ex_create_flow() 2935 put_qp_read(qp); ib_uverbs_ex_create_flow()
|
H A D | core_priv.h | 52 int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
|
H A D | mad.c | 327 if (!port_priv->qp_info[qpn].qp) { ib_register_mad_agent() 341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, ib_register_mad_agent() 364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; ib_register_mad_agent() 532 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; ib_register_mad_snoop() 702 static void build_smp_wc(struct ib_qp *qp, build_smp_wc() argument 713 wc->qp = qp; build_smp_wc() 782 build_smp_wc(mad_agent_priv->agent.qp, handle_outgoing_dr_smp() 1100 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, ib_send_mad() 1245 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, ib_redirect_mad_qp() argument 1982 if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) ib_mad_recv_done_handler() 2031 qp_info->qp->qp_num); ib_mad_recv_done_handler() 2053 qp_info->qp->qp_num); ib_mad_recv_done_handler() 2070 port_priv->device, port_num, qp_info->qp->qp_num); ib_mad_recv_done_handler() 2249 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, ib_mad_send_done_handler() 2307 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr, mad_error_handler() 2321 ret = ib_modify_qp(qp_info->qp, attr, mad_error_handler() 2496 build_smp_wc(recv_mad_agent->agent.qp, local_completions() 2693 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr); ib_mad_post_receive_mads() 2723 if (!qp_info->qp) cleanup_recv_queue() 2757 struct ib_qp *qp; ib_mad_port_start() local 2773 qp = port_priv->qp_info[i].qp; ib_mad_port_start() 2774 if (!qp) ib_mad_port_start() 2783 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; ib_mad_port_start() 2784 ret = ib_modify_qp(qp, attr, IB_QP_STATE | ib_mad_port_start() 2794 ret = ib_modify_qp(qp, attr, IB_QP_STATE); ib_mad_port_start() 2804 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN); ib_mad_port_start() 2822 if (!port_priv->qp_info[i].qp) ib_mad_port_start() 2844 event->event, qp_info->qp->qp_num); qp_event_handler() 2887 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); create_mad_qp() 2888 if (IS_ERR(qp_info->qp)) { create_mad_qp() 2892 ret = PTR_ERR(qp_info->qp); create_mad_qp() 2906 if (!qp_info->qp) destroy_mad_qp() 2909 ib_destroy_qp(qp_info->qp); destroy_mad_qp()
|
H A D | cma.c | 520 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_ud_qp() argument 530 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); cma_init_ud_qp() 535 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); cma_init_ud_qp() 541 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); cma_init_ud_qp() 546 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_conn_qp() argument 556 return ib_modify_qp(qp, &qp_attr, qp_attr_mask); cma_init_conn_qp() 563 struct ib_qp *qp; rdma_create_qp() local 570 qp = ib_create_qp(pd, qp_init_attr); rdma_create_qp() 571 if (IS_ERR(qp)) rdma_create_qp() 572 return PTR_ERR(qp); rdma_create_qp() 575 ret = cma_init_ud_qp(id_priv, qp); rdma_create_qp() 577 ret = cma_init_conn_qp(id_priv, qp); rdma_create_qp() 581 id->qp = qp; rdma_create_qp() 582 id_priv->qp_num = qp->qp_num; rdma_create_qp() 583 id_priv->srq = (qp->srq != NULL); rdma_create_qp() 586 ib_destroy_qp(qp); rdma_create_qp() 597 ib_destroy_qp(id_priv->id.qp); rdma_destroy_qp() 598 id_priv->id.qp = NULL; rdma_destroy_qp() 611 if (!id_priv->id.qp) { cma_modify_qp_rtr() 622 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr() 647 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr() 660 if (!id_priv->id.qp) { cma_modify_qp_rts() 672 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rts() 684 if (!id_priv->id.qp) { cma_modify_qp_err() 690 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); cma_modify_qp_err() 1143 if (id_priv->id.qp) { cma_ib_handler() 2876 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; cma_connect_iw() 2899 if (!id->qp) { rdma_connect() 2973 if (id_priv->id.qp) { cma_accept_iw() 3015 if (!id->qp && conn_param) { rdma_accept() 3155 if (!status && id_priv->id.qp) cma_ib_mc_handler() 3156 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, cma_ib_mc_handler() 3421 if (id->qp) rdma_leave_multicast() 3422 ib_detach_mcast(id->qp, rdma_leave_multicast()
|
H A D | uverbs_main.c | 192 static void ib_uverbs_detach_umcast(struct ib_qp *qp, ib_uverbs_detach_umcast() argument 198 ib_detach_mcast(qp, &mcast->gid, mcast->lid); ib_uverbs_detach_umcast() 240 struct ib_qp *qp = uobj->object; ib_uverbs_cleanup_ucontext() local 245 if (qp != qp->real_qp) { ib_uverbs_cleanup_ucontext() 246 ib_close_qp(qp); ib_uverbs_cleanup_ucontext() 248 ib_uverbs_detach_umcast(qp, uqp); ib_uverbs_cleanup_ucontext() 249 ib_destroy_qp(qp); ib_uverbs_cleanup_ucontext() 521 /* for XRC target qp's, check that qp is live */ ib_uverbs_qp_event_handler() 522 if (!event->element.qp->uobject || !event->element.qp->uobject->live) ib_uverbs_qp_event_handler() 525 uobj = container_of(event->element.qp->uobject, ib_uverbs_qp_event_handler()
|
H A D | iwcm.h | 50 struct ib_qp *qp; member in struct:iwcm_id_private
|
H A D | agent.c | 102 ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); agent_send_response()
|
H A D | mad_priv.h | 187 struct ib_qp *qp; member in struct:ib_mad_qp_info
|
H A D | mad_rmpp.c | 160 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, alloc_response_msg() 290 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, create_rmpp_recv()
|
/linux-4.1.27/include/linux/ |
H A D | ntb.h | 68 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 70 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 75 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp); 76 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 80 void ntb_transport_free_queue(struct ntb_transport_qp *qp); 81 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 83 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 85 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len); 86 void ntb_transport_link_up(struct ntb_transport_qp *qp); 87 void ntb_transport_link_down(struct ntb_transport_qp *qp); 88 bool ntb_transport_link_query(struct ntb_transport_qp *qp);
|
/linux-4.1.27/drivers/scsi/bnx2i/ |
H A D | bnx2i_hwi.c | 153 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_arm_cq_event_coalescing() 170 cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1; bnx2i_arm_cq_event_coalescing() 171 if (cq_index > ep->qp.cqe_size * 2) bnx2i_arm_cq_event_coalescing() 172 cq_index -= ep->qp.cqe_size * 2; bnx2i_arm_cq_event_coalescing() 195 if (!bnx2i_conn->ep->qp.rqe_left) bnx2i_get_rq_buf() 198 bnx2i_conn->ep->qp.rqe_left--; bnx2i_get_rq_buf() 199 memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); bnx2i_get_rq_buf() 200 if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { bnx2i_get_rq_buf() 201 bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; bnx2i_get_rq_buf() 202 bnx2i_conn->ep->qp.rq_cons_idx = 0; bnx2i_get_rq_buf() 204 bnx2i_conn->ep->qp.rq_cons_qe++; bnx2i_get_rq_buf() 205 bnx2i_conn->ep->qp.rq_cons_idx++; bnx2i_get_rq_buf() 220 writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); bnx2i_ring_577xx_doorbell() 234 u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); bnx2i_put_rq_buf() 237 ep->qp.rqe_left += count; bnx2i_put_rq_buf() 238 ep->qp.rq_prod_idx &= 0x7FFF; bnx2i_put_rq_buf() 239 ep->qp.rq_prod_idx += count; bnx2i_put_rq_buf() 241 if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { bnx2i_put_rq_buf() 242 ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; bnx2i_put_rq_buf() 244 ep->qp.rq_prod_idx |= 0x8000; bnx2i_put_rq_buf() 246 ep->qp.rq_prod_idx |= hi_bit; bnx2i_put_rq_buf() 249 rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; bnx2i_put_rq_buf() 250 rq_db->prod_idx = ep->qp.rq_prod_idx; bnx2i_put_rq_buf() 253 writew(ep->qp.rq_prod_idx, bnx2i_put_rq_buf() 254 ep->qp.ctx_base + CNIC_RECV_DOORBELL); bnx2i_put_rq_buf() 277 sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; bnx2i_ring_sq_dbell() 278 sq_db->prod_idx = ep->qp.sq_prod_idx; bnx2i_ring_sq_dbell() 281 writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); bnx2i_ring_sq_dbell() 300 if (bnx2i_conn->ep->qp.sq_prod_qe == bnx2i_ring_dbell_update_sq_params() 301 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params() 302 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params() 303 bnx2i_conn->ep->qp.sq_first_qe; bnx2i_ring_dbell_update_sq_params() 305 bnx2i_conn->ep->qp.sq_prod_qe++; bnx2i_ring_dbell_update_sq_params() 307 if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= bnx2i_ring_dbell_update_sq_params() 308 bnx2i_conn->ep->qp.sq_last_qe) bnx2i_ring_dbell_update_sq_params() 309 bnx2i_conn->ep->qp.sq_prod_qe += count; bnx2i_ring_dbell_update_sq_params() 311 tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - bnx2i_ring_dbell_update_sq_params() 312 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_ring_dbell_update_sq_params() 313 bnx2i_conn->ep->qp.sq_prod_qe = bnx2i_ring_dbell_update_sq_params() 314 &bnx2i_conn->ep->qp.sq_first_qe[count - bnx2i_ring_dbell_update_sq_params() 318 bnx2i_conn->ep->qp.sq_prod_idx += count; bnx2i_ring_dbell_update_sq_params() 320 bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); bnx2i_ring_dbell_update_sq_params() 343 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_login() 402 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_tmf() 473 text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_text() 519 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_scsicmd() 550 nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; bnx2i_send_iscsi_nopout() 613 bnx2i_conn->ep->qp.sq_prod_qe; bnx2i_send_iscsi_logout() 747 (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; bnx2i_send_cmd_cleanup_req() 817 dma_addr = ep->qp.sq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req() 821 dma_addr = ep->qp.cq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req() 829 dma_addr = ep->qp.rq_pgtbl_phys; bnx2i_570x_send_conn_ofld_req() 833 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req() 838 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; bnx2i_570x_send_conn_ofld_req() 878 dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req() 882 dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req() 890 dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; bnx2i_5771x_send_conn_ofld_req() 894 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req() 898 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req() 907 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); bnx2i_5771x_send_conn_ofld_req() 963 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); setup_qp_page_tables() 964 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables() 965 page = ep->qp.sq_phys; setup_qp_page_tables() 968 ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); setup_qp_page_tables() 970 ptbl = (u32 *) ep->qp.sq_pgtbl_virt; setup_qp_page_tables() 991 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); setup_qp_page_tables() 992 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables() 993 page = ep->qp.rq_phys; setup_qp_page_tables() 996 ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); setup_qp_page_tables() 998 ptbl = (u32 *) ep->qp.rq_pgtbl_virt; setup_qp_page_tables() 1019 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); setup_qp_page_tables() 1020 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; setup_qp_page_tables() 1021 page = ep->qp.cq_phys; setup_qp_page_tables() 1024 ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); setup_qp_page_tables() 1026 ptbl = (u32 *) ep->qp.cq_pgtbl_virt; setup_qp_page_tables() 1067 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; bnx2i_alloc_qp_resc() 1068 ep->qp.sq_mem_size = bnx2i_alloc_qp_resc() 1069 (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1070 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc() 1071 (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc() 1072 ep->qp.sq_pgtbl_size = bnx2i_alloc_qp_resc() 1073 (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1075 ep->qp.sq_pgtbl_virt = bnx2i_alloc_qp_resc() 1076 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_alloc_qp_resc() 1077 &ep->qp.sq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1078 if (!ep->qp.sq_pgtbl_virt) { bnx2i_alloc_qp_resc() 1080 ep->qp.sq_pgtbl_size); bnx2i_alloc_qp_resc() 1085 ep->qp.sq_virt = bnx2i_alloc_qp_resc() 1086 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_alloc_qp_resc() 1087 &ep->qp.sq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1088 if (!ep->qp.sq_virt) { bnx2i_alloc_qp_resc() 1090 ep->qp.sq_mem_size); bnx2i_alloc_qp_resc() 1094 memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); bnx2i_alloc_qp_resc() 1095 ep->qp.sq_first_qe = ep->qp.sq_virt; bnx2i_alloc_qp_resc() 1096 ep->qp.sq_prod_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc() 1097 ep->qp.sq_cons_qe = ep->qp.sq_first_qe; bnx2i_alloc_qp_resc() 1098 ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; bnx2i_alloc_qp_resc() 1099 ep->qp.sq_prod_idx = 0; bnx2i_alloc_qp_resc() 1100 ep->qp.sq_cons_idx = 0; bnx2i_alloc_qp_resc() 1101 ep->qp.sqe_left = hba->max_sqes; bnx2i_alloc_qp_resc() 1104 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; bnx2i_alloc_qp_resc() 1105 ep->qp.cq_mem_size = bnx2i_alloc_qp_resc() 1106 (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1107 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc() 1108 (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc() 1109 ep->qp.cq_pgtbl_size = bnx2i_alloc_qp_resc() 1110 (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1112 ep->qp.cq_pgtbl_virt = bnx2i_alloc_qp_resc() 1113 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_alloc_qp_resc() 1114 &ep->qp.cq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1115 if (!ep->qp.cq_pgtbl_virt) { bnx2i_alloc_qp_resc() 1117 ep->qp.cq_pgtbl_size); bnx2i_alloc_qp_resc() 1122 ep->qp.cq_virt = bnx2i_alloc_qp_resc() 1123 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_alloc_qp_resc() 1124 &ep->qp.cq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1125 if (!ep->qp.cq_virt) { bnx2i_alloc_qp_resc() 1127 ep->qp.cq_mem_size); bnx2i_alloc_qp_resc() 1130 memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); bnx2i_alloc_qp_resc() 1132 ep->qp.cq_first_qe = ep->qp.cq_virt; bnx2i_alloc_qp_resc() 1133 ep->qp.cq_prod_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc() 1134 ep->qp.cq_cons_qe = ep->qp.cq_first_qe; bnx2i_alloc_qp_resc() 1135 ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; bnx2i_alloc_qp_resc() 1136 ep->qp.cq_prod_idx = 0; bnx2i_alloc_qp_resc() 1137 ep->qp.cq_cons_idx = 0; bnx2i_alloc_qp_resc() 1138 ep->qp.cqe_left = hba->max_cqes; bnx2i_alloc_qp_resc() 1139 ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; bnx2i_alloc_qp_resc() 1140 ep->qp.cqe_size = hba->max_cqes; bnx2i_alloc_qp_resc() 1143 cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; bnx2i_alloc_qp_resc() 1147 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; bnx2i_alloc_qp_resc() 1148 ep->qp.rq_mem_size = bnx2i_alloc_qp_resc() 1149 (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1150 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc() 1151 (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); bnx2i_alloc_qp_resc() 1152 ep->qp.rq_pgtbl_size = bnx2i_alloc_qp_resc() 1153 (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; bnx2i_alloc_qp_resc() 1155 ep->qp.rq_pgtbl_virt = bnx2i_alloc_qp_resc() 1156 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_alloc_qp_resc() 1157 &ep->qp.rq_pgtbl_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1158 if (!ep->qp.rq_pgtbl_virt) { bnx2i_alloc_qp_resc() 1160 ep->qp.rq_pgtbl_size); bnx2i_alloc_qp_resc() 1165 ep->qp.rq_virt = bnx2i_alloc_qp_resc() 1166 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_alloc_qp_resc() 1167 &ep->qp.rq_phys, GFP_KERNEL); bnx2i_alloc_qp_resc() 1168 if (!ep->qp.rq_virt) { bnx2i_alloc_qp_resc() 1170 ep->qp.rq_mem_size); bnx2i_alloc_qp_resc() 1174 ep->qp.rq_first_qe = ep->qp.rq_virt; bnx2i_alloc_qp_resc() 1175 ep->qp.rq_prod_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc() 1176 ep->qp.rq_cons_qe = ep->qp.rq_first_qe; bnx2i_alloc_qp_resc() 1177 ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; bnx2i_alloc_qp_resc() 1178 ep->qp.rq_prod_idx = 0x8000; bnx2i_alloc_qp_resc() 1179 ep->qp.rq_cons_idx = 0; bnx2i_alloc_qp_resc() 1180 ep->qp.rqe_left = hba->max_rqes; bnx2i_alloc_qp_resc() 1202 if (ep->qp.ctx_base) { bnx2i_free_qp_resc() 1203 iounmap(ep->qp.ctx_base); bnx2i_free_qp_resc() 1204 ep->qp.ctx_base = NULL; bnx2i_free_qp_resc() 1207 if (ep->qp.sq_pgtbl_virt) { bnx2i_free_qp_resc() 1208 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, bnx2i_free_qp_resc() 1209 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); bnx2i_free_qp_resc() 1210 ep->qp.sq_pgtbl_virt = NULL; bnx2i_free_qp_resc() 1211 ep->qp.sq_pgtbl_phys = 0; bnx2i_free_qp_resc() 1213 if (ep->qp.sq_virt) { bnx2i_free_qp_resc() 1214 dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, bnx2i_free_qp_resc() 1215 ep->qp.sq_virt, ep->qp.sq_phys); bnx2i_free_qp_resc() 1216 ep->qp.sq_virt = NULL; bnx2i_free_qp_resc() 1217 ep->qp.sq_phys = 0; bnx2i_free_qp_resc() 1221 if (ep->qp.rq_pgtbl_virt) { bnx2i_free_qp_resc() 1222 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, bnx2i_free_qp_resc() 1223 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); bnx2i_free_qp_resc() 1224 ep->qp.rq_pgtbl_virt = NULL; bnx2i_free_qp_resc() 1225 ep->qp.rq_pgtbl_phys = 0; bnx2i_free_qp_resc() 1227 if (ep->qp.rq_virt) { bnx2i_free_qp_resc() 1228 dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, bnx2i_free_qp_resc() 1229 ep->qp.rq_virt, ep->qp.rq_phys); bnx2i_free_qp_resc() 1230 ep->qp.rq_virt = NULL; bnx2i_free_qp_resc() 1231 ep->qp.rq_phys = 0; bnx2i_free_qp_resc() 1235 if (ep->qp.cq_pgtbl_virt) { bnx2i_free_qp_resc() 1236 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, bnx2i_free_qp_resc() 1237 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); bnx2i_free_qp_resc() 1238 ep->qp.cq_pgtbl_virt = NULL; bnx2i_free_qp_resc() 1239 ep->qp.cq_pgtbl_phys = 0; bnx2i_free_qp_resc() 1241 if (ep->qp.cq_virt) { bnx2i_free_qp_resc() 1242 dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, bnx2i_free_qp_resc() 1243 ep->qp.cq_virt, ep->qp.cq_phys); bnx2i_free_qp_resc() 1244 ep->qp.cq_virt = NULL; bnx2i_free_qp_resc() 1245 ep->qp.cq_phys = 0; bnx2i_free_qp_resc() 1981 struct qp_info *qp; bnx2i_process_new_cqes() local 1989 qp = &bnx2i_conn->ep->qp; bnx2i_process_new_cqes() 1991 if (!qp->cq_virt) { bnx2i_process_new_cqes() 1997 nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; bnx2i_process_new_cqes() 1998 if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) bnx2i_process_new_cqes() 2024 qp->cq_cons_qe); bnx2i_process_new_cqes() 2028 qp->cq_cons_qe); bnx2i_process_new_cqes() 2032 qp->cq_cons_qe); bnx2i_process_new_cqes() 2036 qp->cq_cons_qe); bnx2i_process_new_cqes() 2040 qp->cq_cons_qe)) bnx2i_process_new_cqes() 2045 qp->cq_cons_qe); bnx2i_process_new_cqes() 2049 qp->cq_cons_qe); bnx2i_process_new_cqes() 2054 qp->cq_cons_qe); bnx2i_process_new_cqes() 2058 qp->cq_cons_qe); bnx2i_process_new_cqes() 2083 qp->cqe_exp_seq_sn++; bnx2i_process_new_cqes() 2084 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) bnx2i_process_new_cqes() 2085 qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; bnx2i_process_new_cqes() 2087 if (qp->cq_cons_qe == qp->cq_last_qe) { bnx2i_process_new_cqes() 2088 qp->cq_cons_qe = qp->cq_first_qe; bnx2i_process_new_cqes() 2089 qp->cq_cons_idx = 0; bnx2i_process_new_cqes() 2091 qp->cq_cons_qe++; bnx2i_process_new_cqes() 2092 qp->cq_cons_idx++; bnx2i_process_new_cqes() 2488 ep->qp.ctx_base = NULL; bnx2i_process_ofld_cmpl() 2744 ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); bnx2i_map_ep_dbell_regs() 2763 ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, bnx2i_map_ep_dbell_regs() 2765 if (!ep->qp.ctx_base) bnx2i_map_ep_dbell_regs()
|
H A D | bnx2i.h | 740 * @qp: QP information 760 struct qp_info qp; member in struct:bnx2i_endpoint
|
/linux-4.1.27/drivers/net/ |
H A D | ntb_netdev.c | 65 struct ntb_transport_qp *qp; member in struct:ntb_netdev 79 ntb_transport_link_query(dev->qp)); ntb_netdev_event_handler() 86 if (!ntb_transport_link_query(dev->qp)) ntb_netdev_event_handler() 96 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, ntb_netdev_rx_handler() argument 128 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); ntb_netdev_rx_handler() 136 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, ntb_netdev_tx_handler() argument 165 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len); ntb_netdev_start_xmit() 191 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ntb_netdev_open() 200 ntb_transport_link_up(dev->qp); ntb_netdev_open() 205 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_open() 216 ntb_transport_link_down(dev->qp); ntb_netdev_close() 218 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_close() 230 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN) ntb_netdev_change_mtu() 239 ntb_transport_link_down(dev->qp); ntb_netdev_change_mtu() 244 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++) ntb_netdev_change_mtu() 254 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, ntb_netdev_change_mtu() 265 ntb_transport_link_up(dev->qp); ntb_netdev_change_mtu() 270 ntb_transport_link_down(dev->qp); ntb_netdev_change_mtu() 272 while ((skb = ntb_transport_rx_remove(dev->qp, &len))) ntb_netdev_change_mtu() 352 dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers); ntb_netdev_probe() 353 if (!dev->qp) { ntb_netdev_probe() 358 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; ntb_netdev_probe() 369 ntb_transport_free_queue(dev->qp); ntb_netdev_probe() 395 ntb_transport_free_queue(dev->qp); ntb_netdev_remove()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
H A D | qp.c | 40 #include <linux/mlx4/qp.h> 52 struct mlx4_qp *qp; mlx4_qp_event() local 56 qp = __mlx4_qp_lookup(dev, qpn); mlx4_qp_event() 57 if (qp) mlx4_qp_event() 58 atomic_inc(&qp->refcount); mlx4_qp_event() 62 if (!qp) { mlx4_qp_event() 67 qp->event(qp, event_type); mlx4_qp_event() 69 if (atomic_dec_and_test(&qp->refcount)) mlx4_qp_event() 70 complete(&qp->free); mlx4_qp_event() 74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0) is_master_qp0() argument 79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; is_master_qp0() 81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && is_master_qp0() 82 qp->qpn <= dev->phys_caps.base_sqpn + 1; is_master_qp0() 91 int sqd_event, struct mlx4_qp *qp, int native) __mlx4_qp_modify() 145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, __mlx4_qp_modify() 149 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { __mlx4_qp_modify() 150 port = (qp->qpn & 1) + 1; __mlx4_qp_modify() 174 cpu_to_be32(qp->qpn); __mlx4_qp_modify() 177 qp->qpn | (!!sqd_event << 31), __mlx4_qp_modify() 181 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) { __mlx4_qp_modify() 182 port = (qp->qpn & 1) + 1; __mlx4_qp_modify() 206 int sqd_event, struct mlx4_qp *qp) mlx4_qp_modify() 209 optpar, sqd_event, qp, 0); mlx4_qp_modify() 290 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", mlx4_qp_release_range() 376 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn); mlx4_qp_free_icm() 381 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) mlx4_qp_alloc() argument 390 qp->qpn = qpn; mlx4_qp_alloc() 397 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & mlx4_qp_alloc() 398 (dev->caps.num_qps - 1), qp); mlx4_qp_alloc() 403 atomic_set(&qp->refcount, 1); mlx4_qp_alloc() 404 init_completion(&qp->free); mlx4_qp_alloc() 467 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) mlx4_qp_remove() argument 473 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); mlx4_qp_remove() 478 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) mlx4_qp_free() argument 480 if (atomic_dec_and_test(&qp->refcount)) mlx4_qp_free() 481 complete(&qp->free); mlx4_qp_free() 482 wait_for_completion(&qp->free); mlx4_qp_free() 484 mlx4_qp_free_icm(dev, qp->qpn); mlx4_qp_free() 564 * (in which qp number bits 6 and/or 7 are set); the other set of subareas mlx4_create_zones() 566 * Currently, the values returned by the FW (A0 steering area starting qp number mlx4_create_zones() 806 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, mlx4_init_qp_table() 857 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_qp_query() argument 867 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, mlx4_qp_query() 880 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) mlx4_qp_to_ready() 897 context, 0, 0, qp); mlx4_qp_to_ready() 87 __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp, int native) __mlx4_qp_modify() argument 202 mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, int sqd_event, struct mlx4_qp *qp) mlx4_qp_modify() argument 878 mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_qp_context *context, struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) mlx4_qp_to_ready() argument
|
H A D | Makefile | 4 main.o mcg.o mr.o pd.o port.o profile.o qp.o reset.o sense.o \
|
H A D | mcg.c | 174 /* If the given qpn is also a promisc qp, new_steering_entry() 219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); new_steering_entry() 274 return 0; /* qp is already duplicated */ existing_steering_entry() 277 /* add the qp as a duplicate on this index */ existing_steering_entry() 302 /* if qp is not promisc, it cannot be duplicated */ check_duplicate_entry() 306 /* The qp is promisc qp so it is a duplicate on this index check_duplicate_entry() 356 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; promisc_steering_entry() 358 /* the qp is not promisc, the entry can't be removed */ promisc_steering_entry() 478 if ((be32_to_cpu(mgm->qp[i]) & add_promisc_qp() 502 mgm->qp[members_count++] = add_promisc_qp() 526 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); add_promisc_qp() 592 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); remove_promisc_qp() 637 if ((be32_to_cpu(mgm->qp[i]) & remove_promisc_qp() 653 mgm->qp[loc] = mgm->qp[members_count - 1]; remove_promisc_qp() 654 mgm->qp[members_count - 1] = 0; remove_promisc_qp() 909 "port = %d prio = 0x%x qp = 0x%x ", mlx4_err_rule() 1097 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_qp_attach_common() argument 1152 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { mlx4_qp_attach_common() 1153 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); mlx4_qp_attach_common() 1159 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | mlx4_qp_attach_common() 1162 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); mlx4_qp_attach_common() 1187 new_steering_entry(dev, port, steer, index, qp->qpn); mlx4_qp_attach_common() 1190 index, qp->qpn); mlx4_qp_attach_common() 1206 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_qp_detach_common() argument 1241 check_duplicate_entry(dev, port, steer, index, qp->qpn) && mlx4_qp_detach_common() 1242 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) mlx4_qp_detach_common() 1247 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { mlx4_qp_detach_common() 1253 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); mlx4_qp_detach_common() 1259 mgm->qp[loc] = mgm->qp[members_count - 1]; mlx4_qp_detach_common() 1260 mgm->qp[members_count - 1] = 0; mlx4_qp_detach_common() 1265 index, qp->qpn); mlx4_qp_detach_common() 1327 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_QP_ATTACH() argument 1343 qpn = qp->qpn; mlx4_QP_ATTACH() 1359 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_trans_to_dmfs_attach() argument 1376 rule.qpn = qp->qpn; mlx4_trans_to_dmfs_attach() 1399 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_multicast_attach() argument 1413 return mlx4_QP_ATTACH(dev, qp, gid, 1, mlx4_multicast_attach() 1415 return mlx4_qp_attach_common(dev, qp, gid, mlx4_multicast_attach() 1420 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, mlx4_multicast_attach() 1429 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], mlx4_multicast_detach() argument 1442 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); mlx4_multicast_detach() 1444 return mlx4_qp_detach_common(dev, qp, gid, prot, mlx4_multicast_detach() 1515 struct mlx4_qp *qp, u8 gid[16], mlx4_unicast_attach() 1522 return mlx4_QP_ATTACH(dev, qp, gid, 1, mlx4_unicast_attach() 1525 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, mlx4_unicast_attach() 1530 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, mlx4_unicast_detach() argument 1537 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); mlx4_unicast_detach() 1539 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); mlx4_unicast_detach() 1514 mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) mlx4_unicast_attach() argument
|
H A D | en_resources.c | 36 #include <linux/mlx4/qp.h> 78 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); mlx4_en_fill_qp_context() 115 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) mlx4_en_sqp_event() argument
|
H A D | resource_tracker.c | 43 #include <linux/mlx4/qp.h> 115 /* saved qp params before VST enforcement in order to restore on VGT */ 441 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - mlx4_init_quotas() 451 dev->quotas.qp = mlx4_init_quotas() 1102 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", remove_qp_ok() 1267 enum res_qp_states state, struct res_qp **qp, qp_res_start_move_to() 1321 if (qp) qp_res_start_move_to() 1322 *qp = r; qp_res_start_move_to() 2702 /* adjust qkey in qp context */ adjust_proxy_tun_qkey() 2715 struct res_qp *qp; mlx4_RST2INIT_QP_wrapper() local 2728 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0); mlx4_RST2INIT_QP_wrapper() 2731 qp->local_qpn = local_qpn; mlx4_RST2INIT_QP_wrapper() 2732 qp->sched_queue = 0; mlx4_RST2INIT_QP_wrapper() 2733 qp->param3 = 0; mlx4_RST2INIT_QP_wrapper() 2734 qp->vlan_control = 0; mlx4_RST2INIT_QP_wrapper() 2735 qp->fvl_rx = 0; mlx4_RST2INIT_QP_wrapper() 2736 qp->pri_path_fl = 0; mlx4_RST2INIT_QP_wrapper() 2737 qp->vlan_index = 0; mlx4_RST2INIT_QP_wrapper() 2738 qp->feup = 0; mlx4_RST2INIT_QP_wrapper() 2739 qp->qpc_flags = be32_to_cpu(qpc->flags); mlx4_RST2INIT_QP_wrapper() 2772 qp->mtt = mtt; mlx4_RST2INIT_QP_wrapper() 2774 qp->rcq = rcq; mlx4_RST2INIT_QP_wrapper() 2776 qp->scq = scq; mlx4_RST2INIT_QP_wrapper() 2784 qp->srq = srq; mlx4_RST2INIT_QP_wrapper() 3486 struct res_qp *qp; mlx4_GEN_QP_wrapper() local 3488 err = get_res(dev, slave, qpn, RES_QP, &qp); mlx4_GEN_QP_wrapper() 3491 if (qp->com.from_state != RES_QP_HW) { mlx4_GEN_QP_wrapper() 3575 struct res_qp *qp; mlx4_INIT2RTR_QP_wrapper() local 3602 err = get_res(dev, slave, qpn, RES_QP, &qp); mlx4_INIT2RTR_QP_wrapper() 3605 if (qp->com.from_state != RES_QP_HW) { mlx4_INIT2RTR_QP_wrapper() 3617 qp->sched_queue = orig_sched_queue; mlx4_INIT2RTR_QP_wrapper() 3618 qp->param3 = orig_param3; mlx4_INIT2RTR_QP_wrapper() 3619 qp->vlan_control = orig_vlan_control; mlx4_INIT2RTR_QP_wrapper() 3620 qp->fvl_rx = orig_fvl_rx; mlx4_INIT2RTR_QP_wrapper() 3621 qp->pri_path_fl = orig_pri_path_fl; mlx4_INIT2RTR_QP_wrapper() 3622 qp->vlan_index = orig_vlan_index; mlx4_INIT2RTR_QP_wrapper() 3623 qp->feup = orig_feup; mlx4_INIT2RTR_QP_wrapper() 3740 struct res_qp *qp; mlx4_2RST_QP_wrapper() local 3742 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0); mlx4_2RST_QP_wrapper() 3749 atomic_dec(&qp->mtt->ref_count); mlx4_2RST_QP_wrapper() 3750 atomic_dec(&qp->rcq->ref_count); mlx4_2RST_QP_wrapper() 3751 atomic_dec(&qp->scq->ref_count); mlx4_2RST_QP_wrapper() 3752 if (qp->srq) mlx4_2RST_QP_wrapper() 3753 atomic_dec(&qp->srq->ref_count); mlx4_2RST_QP_wrapper() 3825 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp, qp_attach() argument 3834 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, qp_attach() 3845 return mlx4_qp_attach_common(dev, qp, gid, qp_attach() 3852 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, qp_detach() argument 3860 return mlx4_qp_detach_common(dev, qp, gid, prot, type); qp_detach() 3891 struct mlx4_qp qp; /* dummy for calling attach/detach */ mlx4_QP_ATTACH_wrapper() local 3908 qp.qpn = qpn; mlx4_QP_ATTACH_wrapper() 3910 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot, mlx4_QP_ATTACH_wrapper() 3913 pr_err("Fail to attach rule to qp 0x%x\n", qpn); mlx4_QP_ATTACH_wrapper() 3928 err = qp_detach(dev, &qp, gid, prot, type, reg_id); mlx4_QP_ATTACH_wrapper() 3930 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n", mlx4_QP_ATTACH_wrapper() 3937 qp_detach(dev, &qp, gid, prot, type, reg_id); mlx4_QP_ATTACH_wrapper() 4233 struct mlx4_qp qp; /* dummy for calling attach/detach */ detach_qp() local 4241 qp.qpn = rqp->local_qpn; detach_qp() 4242 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, detach_qp() 4313 struct res_qp *qp; rem_slave_qps() local 4326 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe() 4328 if (qp->com.owner == slave) { list_for_each_entry_safe() 4329 qpn = qp->com.res_id; list_for_each_entry_safe() 4330 detach_qp(dev, slave, qp); list_for_each_entry_safe() 4331 state = qp->com.from_state; list_for_each_entry_safe() 4336 rb_erase(&qp->com.node, list_for_each_entry_safe() 4338 list_del(&qp->com.list); list_for_each_entry_safe() 4345 kfree(qp); list_for_each_entry_safe() 4356 qp->local_qpn, 2, list_for_each_entry_safe() 4362 slave, qp->local_qpn); list_for_each_entry_safe() 4363 atomic_dec(&qp->rcq->ref_count); list_for_each_entry_safe() 4364 atomic_dec(&qp->scq->ref_count); list_for_each_entry_safe() 4365 atomic_dec(&qp->mtt->ref_count); list_for_each_entry_safe() 4366 if (qp->srq) list_for_each_entry_safe() 4367 atomic_dec(&qp->srq->ref_count); list_for_each_entry_safe() 4828 struct res_qp *qp; mlx4_vf_immed_vlan_work_handler() local 4851 mlx4_warn(dev, "Trying to update-qp in slave %d\n", mlx4_vf_immed_vlan_work_handler() 4878 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { list_for_each_entry_safe() 4880 if (qp->com.owner == work->slave) { list_for_each_entry_safe() 4881 if (qp->com.from_state != RES_QP_HW || list_for_each_entry_safe() 4882 !qp->sched_queue || /* no INIT2RTR trans yet */ list_for_each_entry_safe() 4883 mlx4_is_qp_reserved(dev, qp->local_qpn) || list_for_each_entry_safe() 4884 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) { list_for_each_entry_safe() 4888 port = (qp->sched_queue >> 6 & 1) + 1; list_for_each_entry_safe() 4893 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff)) list_for_each_entry_safe() 4899 upd_context->qp_context.param3 = qp->param3; list_for_each_entry_safe() 4900 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control; list_for_each_entry_safe() 4901 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx; list_for_each_entry_safe() 4902 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index; list_for_each_entry_safe() 4903 upd_context->qp_context.pri_path.fl = qp->pri_path_fl; list_for_each_entry_safe() 4904 upd_context->qp_context.pri_path.feup = qp->feup; list_for_each_entry_safe() 4906 qp->sched_queue; list_for_each_entry_safe() 4908 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN); list_for_each_entry_safe() 4912 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN; list_for_each_entry_safe() 4914 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; list_for_each_entry_safe() 4916 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; list_for_each_entry_safe() 4918 qp->sched_queue & 0xC7; list_for_each_entry_safe() 4929 qp->local_qpn & 0xffffff, list_for_each_entry_safe() 4934 work->slave, port, qp->local_qpn, err); list_for_each_entry_safe() 1266 qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, enum res_qp_states state, struct res_qp **qp, int alloc) qp_res_start_move_to() argument
|
H A D | en_rx.c | 37 #include <linux/mlx4/qp.h> 1111 struct mlx4_qp *qp) mlx4_en_config_rss_qp() 1121 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); mlx4_en_config_rss_qp() 1123 en_err(priv, "Failed to allocate qp #%x\n", qpn); mlx4_en_config_rss_qp() 1126 qp->event = mlx4_en_sqp_event; mlx4_en_config_rss_qp() 1143 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); mlx4_en_config_rss_qp() 1145 mlx4_qp_remove(mdev->dev, qp); mlx4_en_config_rss_qp() 1146 mlx4_qp_free(mdev->dev, qp); mlx4_en_config_rss_qp() 1167 en_err(priv, "Failed allocating drop qp\n"); mlx4_en_create_drop_qp() 1185 /* Allocate rx qp's and configure them according to rss map */ mlx4_en_config_rss_steer() 1220 /* Configure RSS indirection qp */ mlx4_en_config_rss_steer() 1108 mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_rx_ring *ring, enum mlx4_qp_state *state, struct mlx4_qp *qp) mlx4_en_config_rss_qp() argument
|
H A D | en_tx.c | 37 #include <linux/mlx4/qp.h> 119 en_err(priv, "failed reserving qp for TX ring\n"); mlx4_en_create_tx_ring() 123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); mlx4_en_create_tx_ring() 125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); mlx4_en_create_tx_ring() 128 ring->qp.event = mlx4_en_sqp_event; mlx4_en_create_tx_ring() 182 mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_en_destroy_tx_ring() 183 mlx4_qp_free(mdev->dev, &ring->qp); mlx4_en_destroy_tx_ring() 210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); mlx4_en_activate_tx_ring() 219 &ring->qp, &ring->qp_state); mlx4_en_activate_tx_ring() 233 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); mlx4_en_deactivate_tx_ring()
|
H A D | en_netdev.c | 502 struct mlx4_qp qp; mlx4_en_uc_steer_add() local 505 qp.qpn = *qpn; mlx4_en_uc_steer_add() 509 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); mlx4_en_uc_steer_add() 553 struct mlx4_qp qp; mlx4_en_uc_steer_release() local 556 qp.qpn = qpn; mlx4_en_uc_steer_release() 560 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); mlx4_en_uc_steer_release() 600 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); mlx4_en_get_qp() 602 en_err(priv, "Failed to reserve qp for mac registration\n"); mlx4_en_get_qp() 681 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 894 /* Add the default qp number as multicast mlx4_en_set_promisc_mode() 988 /* Add the default qp number as multicast promisc */ mlx4_en_do_multicast() 1591 /* Set qp number */ mlx4_en_start_port() 1592 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); mlx4_en_start_port() 1595 en_err(priv, "Failed getting eth qp\n"); mlx4_en_start_port() 1660 /* Set default qp number */ mlx4_en_start_port() 1663 en_err(priv, "Failed setting default qp numbers\n"); mlx4_en_start_port()
|
H A D | mlx4_en.h | 51 #include <linux/mlx4/qp.h> 277 struct mlx4_qp qp; member in struct:mlx4_en_tx_ring 795 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
|
H A D | en_cq.c | 35 #include <linux/mlx4/qp.h>
|
H A D | mlx4.h | 607 __be32 qp[MLX4_MAX_QP_PER_MGM]; member in struct:mlx4_mgm 1275 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1277 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1280 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
H A D | fw.c | 2457 struct mlx4_qp qp; mlx4_opreq_action() local 2504 qp.qpn = be32_to_cpu(mgm->qp[i]); mlx4_opreq_action() 2506 err = mlx4_multicast_detach(dev, &qp, mlx4_opreq_action() 2510 err = mlx4_multicast_attach(dev, &qp, mlx4_opreq_action()
|
H A D | eq.c | 497 be32_to_cpu(eqe->event.qp.qpn) mlx4_eq_int() 512 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & mlx4_eq_int()
|
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_qmr.c | 377 static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, ehea_qp_alloc_register() argument 398 qp->fw_handle, rpage, 1); ehea_qp_alloc_register() 422 struct ehea_qp *qp; ehea_create_qp() local 427 qp = kzalloc(sizeof(*qp), GFP_KERNEL); ehea_create_qp() 428 if (!qp) ehea_create_qp() 431 qp->adapter = adapter; ehea_create_qp() 434 &qp->fw_handle, &qp->epas); ehea_create_qp() 445 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages, ehea_create_qp() 454 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1, ehea_create_qp() 465 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2, ehea_create_qp() 477 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3, ehea_create_qp() 488 qp->init_attr = *init_attr; ehea_create_qp() 490 return qp; ehea_create_qp() 493 hw_queue_dtor(&qp->hw_rqueue2); ehea_create_qp() 496 hw_queue_dtor(&qp->hw_rqueue1); ehea_create_qp() 499 hw_queue_dtor(&qp->hw_squeue); ehea_create_qp() 502 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle); ehea_create_qp() 503 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE); ehea_create_qp() 506 kfree(qp); ehea_create_qp() 510 static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) ehea_destroy_qp_res() argument 513 struct ehea_qp_init_attr *qp_attr = &qp->init_attr; ehea_destroy_qp_res() 516 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); ehea_destroy_qp_res() 517 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); ehea_destroy_qp_res() 521 hw_queue_dtor(&qp->hw_squeue); ehea_destroy_qp_res() 522 hw_queue_dtor(&qp->hw_rqueue1); ehea_destroy_qp_res() 525 hw_queue_dtor(&qp->hw_rqueue2); ehea_destroy_qp_res() 527 hw_queue_dtor(&qp->hw_rqueue3); ehea_destroy_qp_res() 528 kfree(qp); ehea_destroy_qp_res() 533 int ehea_destroy_qp(struct ehea_qp *qp) ehea_destroy_qp() argument 536 if (!qp) ehea_destroy_qp() 539 hcp_epas_dtor(&qp->epas); ehea_destroy_qp() 541 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); ehea_destroy_qp() 543 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr); ehea_destroy_qp() 544 hret = ehea_destroy_qp_res(qp, FORCE_FREE); ehea_destroy_qp()
|
H A D | ehea_hw.h | 218 static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) ehea_update_sqa() argument 220 struct h_epa epa = qp->epas.kernel; ehea_update_sqa() 225 static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq3a() argument 227 struct h_epa epa = qp->epas.kernel; ehea_update_rq3a() 232 static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq2a() argument 234 struct h_epa epa = qp->epas.kernel; ehea_update_rq2a() 239 static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes) ehea_update_rq1a() argument 241 struct h_epa epa = qp->epas.kernel; ehea_update_rq1a()
|
H A D | ehea_qmr.h | 306 static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp, ehea_get_next_rwqe() argument 312 queue = &qp->hw_rqueue1; ehea_get_next_rwqe() 314 queue = &qp->hw_rqueue2; ehea_get_next_rwqe() 316 queue = &qp->hw_rqueue3; ehea_get_next_rwqe() 339 static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index) ehea_poll_rq1() argument 341 struct hw_queue *queue = &qp->hw_rqueue1; ehea_poll_rq1() 352 static inline void ehea_inc_rq1(struct ehea_qp *qp) ehea_inc_rq1() argument 354 hw_qeit_inc(&qp->hw_rqueue1); ehea_inc_rq1() 386 int ehea_destroy_qp(struct ehea_qp *qp);
|
H A D | ehea_main.c | 207 arr[i++].fwh = pr->qp->fw_handle; ehea_update_firmware_handles() 430 ehea_update_rq1a(pr->qp, adder); ehea_refill_rq1() 450 ehea_update_rq1a(pr->qp, i - 1); ehea_init_fill_rq1() 458 struct ehea_qp *qp = pr->qp; ehea_refill_rq_def() local 500 rwqe = ehea_get_next_rwqe(qp, rq_nr); ehea_refill_rq_def() 520 ehea_update_rq2a(pr->qp, adder); ehea_refill_rq_def() 522 ehea_update_rq3a(pr->qp, adder); ehea_refill_rq_def() 654 pr->qp->init_attr.qp_nr); ehea_treat_poll_error() 669 struct ehea_qp *qp = pr->qp; ehea_proc_rwqes() local 685 cqe = ehea_poll_rq1(qp, &wqe_index); ehea_proc_rwqes() 687 ehea_inc_rq1(qp); ehea_proc_rwqes() 752 cqe = ehea_poll_rq1(qp, &wqe_index); ehea_proc_rwqes() 788 swqe = ehea_get_swqe(pr->qp, &swqe_index); check_sqs() 798 ehea_post_swqe(pr->qp, swqe); check_sqs() 909 cqe = ehea_poll_rq1(pr->qp, &wqe_index); ehea_poll() 949 struct ehea_qp *qp; ehea_qp_aff_irq_handler() local 961 qp = port->port_res[qp_token].qp; ehea_qp_aff_irq_handler() 963 resource_type = ehea_error_data(port->adapter, qp->fw_handle, ehea_qp_aff_irq_handler() 1278 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; ehea_fill_port_res() 1392 port->port_res[i].qp->init_attr.qp_nr; ehea_configure_port() 1395 port->port_res[0].qp->init_attr.qp_nr; ehea_configure_port() 1540 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); ehea_init_port_res() 1541 if (!pr->qp) { ehea_init_port_res() 1585 ehea_destroy_qp(pr->qp); ehea_init_port_res() 1597 if (pr->qp) ehea_clean_portres() 1600 ret = ehea_destroy_qp(pr->qp); ehea_clean_portres() 1829 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0; ehea_promiscuous() 2063 swqe = ehea_get_swqe(pr->qp, &swqe_index); ehea_start_xmit() 2106 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); ehea_start_xmit() 2115 ehea_post_swqe(pr->qp, swqe); ehea_start_xmit() 2201 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) ehea_activate_qp() argument 2215 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2223 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2231 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2239 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2247 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2255 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2263 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_activate_qp() 2384 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); ehea_up() 2511 struct ehea_qp qp = *orig_qp; ehea_purge_sq() local 2512 struct ehea_qp_init_attr *init_attr = &qp.init_attr; ehea_purge_sq() 2518 swqe = ehea_get_swqe(&qp, &wqe_index); ehea_purge_sq() 2563 struct ehea_qp *qp = pr->qp; ehea_stop_qps() local 2566 ehea_purge_sq(qp); ehea_stop_qps() 2569 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps() 2580 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps() 2589 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_stop_qps() 2614 struct ehea_qp qp = *orig_qp; ehea_update_rqs() local 2615 struct ehea_qp_init_attr *init_attr = &qp.init_attr; ehea_update_rqs() 2627 rwqe = ehea_get_next_rwqe(&qp, 2); ehea_update_rqs() 2636 rwqe = ehea_get_next_rwqe(&qp, 3); ehea_update_rqs() 2665 struct ehea_qp *qp = pr->qp; ehea_restart_qps() local 2673 ehea_update_rqs(qp, pr); ehea_restart_qps() 2676 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps() 2687 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps() 2696 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, ehea_restart_qps()
|
H A D | ehea.h | 363 struct ehea_qp *qp; member in struct:ehea_port_res
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_verbs.c | 60 ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY); ipoib_mcast_attach() 68 ret = ib_attach_mcast(priv->qp, mgid, mlid); ipoib_mcast_attach() 96 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp() 105 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp() 115 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask); ipoib_init_qp() 125 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_init_qp() 213 priv->qp = ib_create_qp(priv->pd, &init_attr); ipoib_transport_dev_init() 214 if (IS_ERR(priv->qp)) { ipoib_transport_dev_init() 219 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; ipoib_transport_dev_init() 220 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; ipoib_transport_dev_init() 221 priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; ipoib_transport_dev_init() 266 if (priv->qp) { ipoib_transport_dev_cleanup() 267 if (ib_destroy_qp(priv->qp)) ipoib_transport_dev_cleanup() 270 priv->qp = NULL; ipoib_transport_dev_cleanup()
|
H A D | ipoib_cm.c | 128 ret = ib_post_recv(rx->qp, wr, &bad_wr); ipoib_cm_post_receive_nonsrq() 226 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) ipoib_cm_start_rx_drain() 273 struct ib_cm_id *cm_id, struct ib_qp *qp, ipoib_cm_modify_rx_qp() 286 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp() 298 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp() 318 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_rx_qp() 420 struct ib_qp *qp, struct ib_cm_req_event_param *req, ipoib_cm_send_rep() 427 data.qpn = cpu_to_be32(priv->qp->qp_num); ipoib_cm_send_rep() 435 rep.qp_num = qp->qp_num; ipoib_cm_send_rep() 459 p->qp = ipoib_cm_create_rx_qp(dev, p); ipoib_cm_req_handler() 460 if (IS_ERR(p->qp)) { ipoib_cm_req_handler() 461 ret = PTR_ERR(p->qp); ipoib_cm_req_handler() 466 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); ipoib_cm_req_handler() 486 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); ipoib_cm_req_handler() 489 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_cm_req_handler() 490 ipoib_warn(priv, "unable to move qp to error state\n"); ipoib_cm_req_handler() 495 ib_destroy_qp(p->qp); ipoib_cm_req_handler() 517 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) ipoib_cm_rx_handler() 518 ipoib_warn(priv, "unable to move qp to error state\n"); ipoib_cm_rx_handler() 587 p = wc->qp->qp_context; ipoib_cm_handle_rx_wc() 707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); post_send() 727 tx->tx_head, skb->len, tx->qp->qp_num); ipoib_cm_send() 763 tx->qp->qp_num); ipoib_cm_send() 778 struct ipoib_cm_tx *tx = wc->qp->qp_context; ipoib_cm_handle_tx_wc() 856 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), ipoib_cm_dev_open() 860 IPOIB_CM_IETF_ID | priv->qp->qp_num); ipoib_cm_dev_open() 885 ib_destroy_qp(rx->qp); ipoib_cm_free_rx_reap_list() 915 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); ipoib_cm_dev_stop() 917 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); ipoib_cm_dev_stop() 980 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); ipoib_cm_rep_handler() 992 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); ipoib_cm_rep_handler() 1050 struct ib_cm_id *id, struct ib_qp *qp, ipoib_cm_send_req() 1058 data.qpn = cpu_to_be32(priv->qp->qp_num); ipoib_cm_send_req() 1064 req.qp_num = qp->qp_num; ipoib_cm_send_req() 1065 req.qp_type = qp->qp_type; ipoib_cm_send_req() 1087 struct ib_cm_id *cm_id, struct ib_qp *qp) ipoib_cm_modify_tx_init() 1103 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); ipoib_cm_modify_tx_init() 1126 p->qp = ipoib_cm_create_tx_qp(p->dev, p); ipoib_cm_tx_init() 1127 if (IS_ERR(p->qp)) { ipoib_cm_tx_init() 1128 ret = PTR_ERR(p->qp); ipoib_cm_tx_init() 1129 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); ipoib_cm_tx_init() 1140 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); ipoib_cm_tx_init() 1142 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); ipoib_cm_tx_init() 1146 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); ipoib_cm_tx_init() 1153 p->qp->qp_num, pathrec->dgid.raw, qpn); ipoib_cm_tx_init() 1162 ib_destroy_qp(p->qp); ipoib_cm_tx_init() 1164 p->qp = NULL; ipoib_cm_tx_init() 1177 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); ipoib_cm_tx_destroy() 1212 if (p->qp) ipoib_cm_tx_destroy() 1213 ib_destroy_qp(p->qp); ipoib_cm_tx_destroy() 1446 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); ipoib_cm_stale_task() 1448 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); ipoib_cm_stale_task() 272 ipoib_cm_modify_rx_qp(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, unsigned psn) ipoib_cm_modify_rx_qp() argument 419 ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp, struct ib_cm_req_event_param *req, unsigned psn) ipoib_cm_send_rep() argument 1049 ipoib_cm_send_req(struct net_device *dev, struct ib_cm_id *id, struct ib_qp *qp, u32 qpn, struct ib_sa_path_rec *pathrec) ipoib_cm_send_req() argument 1086 ipoib_cm_modify_tx_init(struct net_device *dev, struct ib_cm_id *cm_id, struct ib_qp *qp) ipoib_cm_modify_tx_init() argument
|
H A D | ipoib_ib.c | 113 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); ipoib_ib_post_receive() 210 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) ipoib_ib_handle_rx_wc() 345 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); ipoib_qp_state_validate_work() 352 __func__, priv->qp->qp_num, qp_attr.qp_state); ipoib_qp_state_validate_work() 358 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); ipoib_qp_state_validate_work() 361 ret, priv->qp->qp_num); ipoib_qp_state_validate_work() 365 __func__, priv->qp->qp_num); ipoib_qp_state_validate_work() 368 priv->qp->qp_num, qp_attr.qp_state); ipoib_qp_state_validate_work() 413 ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n", ipoib_ib_handle_tx_wc() 414 __func__, priv->qp->qp_num); ipoib_ib_handle_tx_wc() 547 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); post_send() 853 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_ib_dev_stop() 902 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_ib_dev_stop() 918 priv->qp = NULL; ipoib_ib_dev_init()
|
H A D | ipoib.h | 226 struct ib_qp *qp; member in struct:ipoib_cm_rx 237 struct ib_qp *qp; member in struct:ipoib_cm_tx 348 struct ib_qp *qp; member in struct:ipoib_dev_priv
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | unwind_decoder.c | 54 * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) 58 * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) 60 * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) 62 * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) 122 unsigned char byte1, byte2, abreg, qp; unw_decode_x3() local 129 qp = (byte1 & 0x3f); unw_decode_x3() 133 UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); unw_decode_x3() 135 UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); unw_decode_x3() 142 unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_decode_x4() local 148 qp = (byte1 & 0x3f); unw_decode_x4() 154 UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); unw_decode_x4() 156 UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); unw_decode_x4()
|
H A D | kprobes.c | 91 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ set_brl_inst() 149 * (qp) cmpx.crel.ctype p1,p2=r2,r3 186 * Returns qp value if supported 194 int qp; unsupported_inst() local 196 qp = kprobe_inst & 0x3f; unsupported_inst() 198 if (slot == 1 && qp) { unsupported_inst() 205 qp = 0; unsupported_inst() 237 if (slot == 1 && qp) { unsupported_inst() 243 qp = 0; unsupported_inst() 272 if (slot == 1 && qp) { unsupported_inst() 279 qp = 0; unsupported_inst() 284 if (slot == 1 && qp) { unsupported_inst() 290 qp = 0; unsupported_inst() 293 return qp; unsupported_inst() 304 int qp) prepare_break_inst() 310 * Copy the original kprobe_inst qualifying predicate(qp) prepare_break_inst() 313 break_inst |= qp; prepare_break_inst() 589 int qp; arch_prepare_kprobe() local 604 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); arch_prepare_kprobe() 605 if (qp < 0) arch_prepare_kprobe() 614 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); arch_prepare_kprobe() 300 prepare_break_inst(uint template, uint slot, uint major_opcode, unsigned long kprobe_inst, struct kprobe *p, int qp) prepare_break_inst() argument
|
H A D | brl_emu.c | 58 unsigned long opcode, btype, qp, offset, cpl; ia64_emulate_brl() local 83 qp = ((bundle[1] >> 23) & 0x3f); ia64_emulate_brl() 89 tmp_taken = regs->pr & (1L << qp); ia64_emulate_brl()
|
H A D | paravirt_patch.c | 31 unsigned long long qp : 6; member in struct:ia64_inst::__anon1713 355 unsigned long qp: 6; member in struct:inst_x3_op::__anon1714 414 unsigned long qp: 6; member in struct:inst_b1::__anon1716
|
H A D | unwind.c | 1073 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) desc_is_active() 1077 if (qp > 0) { desc_is_active() 1078 if ((sr->pr_val & (1UL << qp)) == 0) desc_is_active() 1080 sr->pr_mask |= (1UL << qp); desc_is_active() 1086 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) desc_restore_p() 1090 if (!desc_is_active(qp, t, sr)) desc_restore_p() 1100 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, desc_spill_reg_p() 1106 if (!desc_is_active(qp, t, sr)) desc_spill_reg_p() 1121 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, desc_spill_psprel_p() 1126 if (!desc_is_active(qp, t, sr)) desc_spill_psprel_p() 1136 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, desc_spill_sprel_p() 1141 if (!desc_is_active(qp, t, sr)) desc_spill_sprel_p() 1071 desc_is_active(unsigned char qp, unw_word t, struct unw_state_record *sr) desc_is_active() argument 1084 desc_restore_p(unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) desc_restore_p() argument 1098 desc_spill_reg_p(unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) desc_spill_reg_p() argument 1119 desc_spill_psprel_p(unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) desc_spill_psprel_p() argument 1134 desc_spill_sprel_p(unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) desc_spill_sprel_p() argument
|
H A D | unaligned.c | 166 unsigned long qp:6; /* [0:5] */ member in struct:__anon1735 1384 DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d " ia64_handle_unaligned() 1385 "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm, ia64_handle_unaligned()
|
/linux-4.1.27/drivers/misc/vmw_vmci/ |
H A D | vmci_queue_pair.c | 221 struct qp_entry qp; member in struct:qp_broker_entry 239 struct qp_entry qp; member in struct:qp_guest_endpoint 914 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle); qp_guest_handle_to_entry() local 916 entry = qp ? container_of( qp_guest_handle_to_entry() 917 qp, struct qp_guest_endpoint, qp) : NULL; qp_guest_handle_to_entry() 928 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle); qp_broker_handle_to_entry() local 930 entry = qp ? container_of( qp_broker_handle_to_entry() 931 qp, struct qp_broker_entry, qp) : NULL; qp_broker_handle_to_entry() 986 entry->qp.peer = peer; qp_guest_endpoint_create() 987 entry->qp.flags = flags; qp_guest_endpoint_create() 988 entry->qp.produce_size = produce_size; qp_guest_endpoint_create() 989 entry->qp.consume_size = consume_size; qp_guest_endpoint_create() 990 entry->qp.ref_count = 0; qp_guest_endpoint_create() 994 INIT_LIST_HEAD(&entry->qp.list_item); qp_guest_endpoint_create() 1000 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_guest_endpoint_create() 1002 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) { qp_guest_endpoint_create() 1019 qp_free_queue(entry->produce_q, entry->qp.produce_size); qp_guest_endpoint_destroy() 1020 qp_free_queue(entry->consume_q, entry->qp.consume_size); qp_guest_endpoint_destroy() 1050 alloc_msg->handle = entry->qp.handle; qp_alloc_hypercall() 1051 alloc_msg->peer = entry->qp.peer; qp_alloc_hypercall() 1052 alloc_msg->flags = entry->qp.flags; qp_alloc_hypercall() 1053 alloc_msg->produce_size = entry->qp.produce_size; qp_alloc_hypercall() 1054 alloc_msg->consume_size = entry->qp.consume_size; qp_alloc_hypercall() 1121 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_detatch_guest_work() 1124 if (entry->qp.ref_count > 1) { qp_detatch_guest_work() 1156 entry->qp.ref_count--; qp_detatch_guest_work() 1157 if (entry->qp.ref_count == 0) qp_detatch_guest_work() 1158 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp); qp_detatch_guest_work() 1162 ref_count = entry->qp.ref_count; qp_detatch_guest_work() 1202 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_alloc_guest_work() 1204 if (queue_pair_entry->qp.ref_count > 1) { qp_alloc_guest_work() 1210 if (queue_pair_entry->qp.produce_size != consume_size || qp_alloc_guest_work() 1211 queue_pair_entry->qp.consume_size != qp_alloc_guest_work() 1213 queue_pair_entry->qp.flags != qp_alloc_guest_work() 1273 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) { qp_alloc_guest_work() 1286 if (queue_pair_entry->qp.handle.context != context_id || qp_alloc_guest_work() 1287 (queue_pair_entry->qp.peer != VMCI_INVALID_ID && qp_alloc_guest_work() 1288 queue_pair_entry->qp.peer != context_id)) { qp_alloc_guest_work() 1293 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) { qp_alloc_guest_work() 1308 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp); qp_alloc_guest_work() 1311 queue_pair_entry->qp.ref_count++; qp_alloc_guest_work() 1312 *handle = queue_pair_entry->qp.handle; qp_alloc_guest_work() 1321 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) && qp_alloc_guest_work() 1322 queue_pair_entry->qp.ref_count == 1) { qp_alloc_guest_work() 1425 entry->qp.handle = handle; qp_broker_create() 1426 entry->qp.peer = peer; qp_broker_create() 1427 entry->qp.flags = flags; qp_broker_create() 1428 entry->qp.produce_size = guest_produce_size; qp_broker_create() 1429 entry->qp.consume_size = guest_consume_size; qp_broker_create() 1430 entry->qp.ref_count = 1; qp_broker_create() 1454 INIT_LIST_HEAD(&entry->qp.list_item); qp_broker_create() 1459 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp), qp_broker_create() 1468 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1); qp_broker_create() 1493 qp_list_add_entry(&qp_broker_list, &entry->qp); qp_broker_create() 1507 entry->qp.handle = vmci_resource_handle(&entry->resource); qp_broker_create() 1510 entry->qp.handle); qp_broker_create() 1512 entry->qp.handle); qp_broker_create() 1515 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_create() 1615 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) || qp_broker_attach() 1648 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id) qp_broker_attach() 1677 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER)) qp_broker_attach() 1687 if (entry->qp.produce_size != produce_size || qp_broker_attach() 1688 entry->qp.consume_size != consume_size) { qp_broker_attach() 1691 } else if (entry->qp.produce_size != consume_size || qp_broker_attach() 1692 entry->qp.consume_size != produce_size) { qp_broker_attach() 1747 qp_notify_peer(true, entry->qp.handle, context_id, qp_broker_attach() 1751 entry->create_id, entry->qp.handle.context, qp_broker_attach() 1752 entry->qp.handle.resource); qp_broker_attach() 1756 entry->qp.ref_count++; qp_broker_attach() 1767 vmci_ctx_qp_create(context, entry->qp.handle); qp_broker_attach() 2122 entry->create_id, entry->qp.handle.context, vmci_qp_broker_set_page_store() 2123 entry->qp.handle.resource); vmci_qp_broker_set_page_store() 2204 entry->qp.ref_count--; vmci_qp_broker_detach() 2206 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_detach() 2258 if (entry->qp.ref_count == 0) { vmci_qp_broker_detach() 2259 qp_list_remove_entry(&qp_broker_list, &entry->qp); vmci_qp_broker_detach() 2265 qp_host_free_queue(entry->produce_q, entry->qp.produce_size); vmci_qp_broker_detach() 2266 qp_host_free_queue(entry->consume_q, entry->qp.consume_size); vmci_qp_broker_detach() 2333 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_map() 2340 page_store.len = QPE_NUM_PAGES(entry->qp); vmci_qp_broker_map() 2444 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL; vmci_qp_broker_unmap()
|
/linux-4.1.27/drivers/scsi/lpfc/ |
H A D | lpfc_debugfs.c | 2002 struct lpfc_queue *qp = NULL; lpfc_idiag_queinfo_read() local 2024 qp = phba->sli4_hba.hba_eq[x]; lpfc_idiag_queinfo_read() 2025 if (!qp) lpfc_idiag_queinfo_read() 2033 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2034 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2041 qp->queue_id, lpfc_idiag_queinfo_read() 2042 qp->entry_count, lpfc_idiag_queinfo_read() 2043 qp->entry_size, lpfc_idiag_queinfo_read() 2044 qp->host_index, lpfc_idiag_queinfo_read() 2045 qp->hba_index); lpfc_idiag_queinfo_read() 2049 qp->EQ_max_eqe = 0; lpfc_idiag_queinfo_read() 2057 qp = phba->sli4_hba.fcp_cq[x]; lpfc_idiag_queinfo_read() 2066 qp->assoc_qid, lpfc_idiag_queinfo_read() 2067 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2068 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2074 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2075 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2076 qp->hba_index); lpfc_idiag_queinfo_read() 2080 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read() 2088 qp = phba->sli4_hba.fcp_wq[x]; lpfc_idiag_queinfo_read() 2097 qp->assoc_qid, lpfc_idiag_queinfo_read() 2098 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2104 qp->queue_id, lpfc_idiag_queinfo_read() 2105 qp->entry_count, lpfc_idiag_queinfo_read() 2106 qp->entry_size, lpfc_idiag_queinfo_read() 2107 qp->host_index, lpfc_idiag_queinfo_read() 2108 qp->hba_index); lpfc_idiag_queinfo_read() 2121 qp = phba->sli4_hba.mbx_cq; lpfc_idiag_queinfo_read() 2122 if (qp) { lpfc_idiag_queinfo_read() 2131 qp->assoc_qid, lpfc_idiag_queinfo_read() 2132 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2133 qp->q_cnt_3, lpfc_idiag_queinfo_read() 2134 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2140 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2141 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2142 qp->hba_index); lpfc_idiag_queinfo_read() 2151 qp = phba->sli4_hba.mbx_wq; lpfc_idiag_queinfo_read() 2152 if (qp) { lpfc_idiag_queinfo_read() 2165 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2166 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2167 qp->hba_index); lpfc_idiag_queinfo_read() 2176 qp = phba->sli4_hba.els_cq; lpfc_idiag_queinfo_read() 2177 if (qp) { lpfc_idiag_queinfo_read() 2186 qp->assoc_qid, lpfc_idiag_queinfo_read() 2187 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2188 qp->q_cnt_3, lpfc_idiag_queinfo_read() 2189 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2195 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2196 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2197 qp->hba_index); lpfc_idiag_queinfo_read() 2200 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read() 2209 qp = phba->sli4_hba.els_wq; lpfc_idiag_queinfo_read() 2210 if (qp) { lpfc_idiag_queinfo_read() 2219 qp->assoc_qid, lpfc_idiag_queinfo_read() 2220 qp->q_cnt_1, lpfc_idiag_queinfo_read() 2221 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2227 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2228 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2229 qp->hba_index); lpfc_idiag_queinfo_read() 2239 qp = phba->sli4_hba.hdr_rq; lpfc_idiag_queinfo_read() 2249 qp->assoc_qid, lpfc_idiag_queinfo_read() 2250 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2251 qp->q_cnt_3, lpfc_idiag_queinfo_read() 2252 (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2258 qp->queue_id, lpfc_idiag_queinfo_read() 2259 qp->entry_count, lpfc_idiag_queinfo_read() 2260 qp->entry_size, lpfc_idiag_queinfo_read() 2261 qp->host_index, lpfc_idiag_queinfo_read() 2262 qp->hba_index); lpfc_idiag_queinfo_read() 2265 qp = phba->sli4_hba.dat_rq; lpfc_idiag_queinfo_read() 2271 qp->queue_id, lpfc_idiag_queinfo_read() 2272 qp->entry_count, lpfc_idiag_queinfo_read() 2273 qp->entry_size, lpfc_idiag_queinfo_read() 2274 qp->host_index, lpfc_idiag_queinfo_read() 2275 qp->hba_index); lpfc_idiag_queinfo_read() 2285 qp = phba->sli4_hba.fof_eq; lpfc_idiag_queinfo_read() 2286 if (!qp) lpfc_idiag_queinfo_read() 2294 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2295 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2302 qp->queue_id, lpfc_idiag_queinfo_read() 2303 qp->entry_count, lpfc_idiag_queinfo_read() 2304 qp->entry_size, lpfc_idiag_queinfo_read() 2305 qp->host_index, lpfc_idiag_queinfo_read() 2306 qp->hba_index); lpfc_idiag_queinfo_read() 2309 qp->EQ_max_eqe = 0; lpfc_idiag_queinfo_read() 2320 qp = phba->sli4_hba.oas_cq; lpfc_idiag_queinfo_read() 2321 if (qp) { lpfc_idiag_queinfo_read() 2330 qp->assoc_qid, lpfc_idiag_queinfo_read() 2331 qp->q_cnt_1, qp->q_cnt_2, lpfc_idiag_queinfo_read() 2332 qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2338 qp->queue_id, qp->entry_count, lpfc_idiag_queinfo_read() 2339 qp->entry_size, qp->host_index, lpfc_idiag_queinfo_read() 2340 qp->hba_index); lpfc_idiag_queinfo_read() 2343 qp->CQ_max_cqe = 0; lpfc_idiag_queinfo_read() 2352 qp = phba->sli4_hba.oas_wq; lpfc_idiag_queinfo_read() 2353 if (qp) { lpfc_idiag_queinfo_read() 2361 qp->assoc_qid, lpfc_idiag_queinfo_read() 2362 qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); lpfc_idiag_queinfo_read() 2368 qp->queue_id, lpfc_idiag_queinfo_read() 2369 qp->entry_count, lpfc_idiag_queinfo_read() 2370 qp->entry_size, lpfc_idiag_queinfo_read() 2371 qp->host_index, lpfc_idiag_queinfo_read() 2372 qp->hba_index); lpfc_idiag_queinfo_read()
|
/linux-4.1.27/lib/mpi/ |
H A D | mpih-div.c | 58 mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs, mpihelp_divrem() argument 87 qp += qextra_limbs; mpihelp_divrem() 89 udiv_qrnnd(qp[i], n1, n1, np[i], d); mpihelp_divrem() 90 qp -= qextra_limbs; mpihelp_divrem() 93 udiv_qrnnd(qp[i], n1, n1, 0, d); mpihelp_divrem() 135 qp[i] = q; mpihelp_divrem() 156 qp[i] = q; mpihelp_divrem() 229 qp[i] = q; mpihelp_divrem()
|
H A D | mpi-internal.h | 224 mpi_limb_t mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
|
/linux-4.1.27/drivers/scsi/sym53c8xx_2/ |
H A D | sym_misc.h | 159 #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ 160 for (qp = (head)->flink; qp != (head); qp = qp->flink)
|
H A D | sym_hipd.c | 1542 SYM_QUEHEAD *qp; sym_start_next_ccbs() local 1556 qp = sym_remque_head(&lp->waiting_ccbq); sym_start_next_ccbs() 1557 if (!qp) sym_start_next_ccbs() 1559 cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq); sym_start_next_ccbs() 1563 sym_insque_head(qp, &lp->waiting_ccbq); sym_start_next_ccbs() 1572 sym_insque_head(qp, &lp->waiting_ccbq); sym_start_next_ccbs() 1581 sym_insque_tail(qp, &lp->started_ccbq); sym_start_next_ccbs() 1643 SYM_QUEHEAD *qp; sym_flush_comp_queue() local 1646 while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) { sym_flush_comp_queue() 1648 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_flush_comp_queue() 1947 SYM_QUEHEAD *qp; sym_settrans() local 2035 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_settrans() 2037 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_settrans() 3200 SYM_QUEHEAD qtmp, *qp; sym_clear_tasks() local 3216 while ((qp = sym_remque_head(&qtmp)) != NULL) { sym_clear_tasks() 3218 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_clear_tasks() 3283 SYM_QUEHEAD *qp; sym_sir_task_recovery() local 3324 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery() 3325 cp = sym_que_entry(qp,struct sym_ccb,link_ccbq); sym_sir_task_recovery() 3356 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery() 3357 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery() 3472 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_sir_task_recovery() 3473 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_sir_task_recovery() 4657 SYM_QUEHEAD *qp; sym_get_ccb() local 4665 qp = sym_remque_head(&np->free_ccbq); sym_get_ccb() 4666 if (!qp) sym_get_ccb() 4668 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_get_ccb() 5333 SYM_QUEHEAD *qp; sym_abort_scsiio() local 5339 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_abort_scsiio() 5340 struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_abort_scsiio() 5805 SYM_QUEHEAD *qp; sym_hcb_free() local 5822 while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) { sym_hcb_free() 5823 cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_hcb_free()
|
H A D | sym_glue.c | 598 SYM_QUEHEAD *qp; sym_eh_handler() local 637 FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { sym_eh_handler() 638 struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); sym_eh_handler()
|
/linux-4.1.27/drivers/crypto/ |
H A D | n2_core.c | 233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 236 qp->head != qp->tail) 477 static unsigned long wait_for_tail(struct spu_queue *qp) wait_for_tail() argument 482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); wait_for_tail() 487 if (head == qp->tail) { wait_for_tail() 488 qp->head = head; wait_for_tail() 495 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, submit_and_wait_for_tail() argument 498 unsigned long hv_ret = spu_queue_submit(qp, ent); submit_and_wait_for_tail() 501 hv_ret = wait_for_tail(qp); submit_and_wait_for_tail() 514 struct spu_queue *qp; n2_do_async_digest() local 539 qp = cpu_to_cwq[cpu]; n2_do_async_digest() 540 if (!qp) n2_do_async_digest() 543 spin_lock_irqsave(&qp->lock, flags); n2_do_async_digest() 548 ent = qp->q + qp->tail; n2_do_async_digest() 565 ent = spu_queue_next(qp, ent); n2_do_async_digest() 580 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) n2_do_async_digest() 585 spin_unlock_irqrestore(&qp->lock, flags); n2_do_async_digest() 827 struct spu_queue *qp, bool encrypt) __n2_crypt_chunk() 834 ent = spu_queue_alloc(qp, cp->arr_len); __n2_crypt_chunk() 857 ent = spu_queue_next(qp, ent); __n2_crypt_chunk() 870 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; __n2_crypt_chunk() 978 struct spu_queue *qp; n2_do_ecb() local 983 qp = cpu_to_cwq[get_cpu()]; n2_do_ecb() 985 if (!qp) n2_do_ecb() 988 spin_lock_irqsave(&qp->lock, flags); n2_do_ecb() 991 err = __n2_crypt_chunk(tfm, c, qp, encrypt); n2_do_ecb() 999 hv_ret = wait_for_tail(qp); n2_do_ecb() 1004 spin_unlock_irqrestore(&qp->lock, flags); n2_do_ecb() 1030 struct spu_queue *qp; n2_do_chaining() local 1038 qp = cpu_to_cwq[get_cpu()]; n2_do_chaining() 1040 if (!qp) n2_do_chaining() 1043 spin_lock_irqsave(&qp->lock, flags); n2_do_chaining() 1050 err = __n2_crypt_chunk(tfm, c, qp, true); n2_do_chaining() 1080 err = __n2_crypt_chunk(tfm, c, qp, false); n2_do_chaining() 1089 hv_ret = wait_for_tail(qp); n2_do_chaining() 1094 spin_unlock_irqrestore(&qp->lock, flags); n2_do_chaining() 826 __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, struct spu_queue *qp, bool encrypt) __n2_crypt_chunk() argument
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
H A D | sunhme.c | 2139 struct quattro *qp = (struct quattro *) cookie; quattro_sbus_interrupt() local 2143 struct net_device *dev = qp->happy_meals[i]; quattro_sbus_interrupt() 2558 struct quattro *qp; quattro_sbus_find() local 2561 qp = platform_get_drvdata(op); quattro_sbus_find() 2562 if (qp) quattro_sbus_find() 2563 return qp; quattro_sbus_find() 2565 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); quattro_sbus_find() 2566 if (qp != NULL) { quattro_sbus_find() 2570 qp->happy_meals[i] = NULL; quattro_sbus_find() 2572 qp->quattro_dev = child; quattro_sbus_find() 2573 qp->next = qfe_sbus_list; quattro_sbus_find() 2574 qfe_sbus_list = qp; quattro_sbus_find() 2576 platform_set_drvdata(op, qp); quattro_sbus_find() 2578 return qp; quattro_sbus_find() 2587 struct quattro *qp; quattro_sbus_register_irqs() local 2589 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { quattro_sbus_register_irqs() 2590 struct platform_device *op = qp->quattro_dev; quattro_sbus_register_irqs() 2594 if (!qp->happy_meals[qfe_slot]) quattro_sbus_register_irqs() 2603 qp); quattro_sbus_register_irqs() 2616 struct quattro *qp; quattro_sbus_free_irqs() local 2618 for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { quattro_sbus_free_irqs() 2619 struct platform_device *op = qp->quattro_dev; quattro_sbus_free_irqs() 2623 if (!qp->happy_meals[qfe_slot]) quattro_sbus_free_irqs() 2629 free_irq(op->archdata.irqs[0], qp); quattro_sbus_free_irqs() 2638 struct quattro *qp; quattro_pci_find() local 2641 for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { quattro_pci_find() 2642 struct pci_dev *qpdev = qp->quattro_dev; quattro_pci_find() 2645 return qp; quattro_pci_find() 2647 qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); quattro_pci_find() 2648 if (qp != NULL) { quattro_pci_find() 2652 qp->happy_meals[i] = NULL; quattro_pci_find() 2654 qp->quattro_dev = bdev; quattro_pci_find() 2655 qp->next = qfe_pci_list; quattro_pci_find() 2656 qfe_pci_list = qp; quattro_pci_find() 2659 qp->nranges = 0; quattro_pci_find() 2661 return qp; quattro_pci_find() 2681 struct quattro *qp = NULL; happy_meal_sbus_probe_one() local 2694 qp = quattro_sbus_find(op); happy_meal_sbus_probe_one() 2695 if (qp == NULL) happy_meal_sbus_probe_one() 2698 if (qp->happy_meals[qfe_slot] == NULL) happy_meal_sbus_probe_one() 2744 if (qp != NULL) { happy_meal_sbus_probe_one() 2745 hp->qfe_parent = qp; happy_meal_sbus_probe_one() 2747 qp->happy_meals[qfe_slot] = dev; happy_meal_sbus_probe_one() 2795 if (qp != NULL) happy_meal_sbus_probe_one() 2884 if (qp) happy_meal_sbus_probe_one() 2885 qp->happy_meals[qfe_slot] = NULL; happy_meal_sbus_probe_one() 2983 struct quattro *qp = NULL; happy_meal_pci_probe() local 3013 qp = quattro_pci_find(pdev); happy_meal_pci_probe() 3014 if (qp == NULL) happy_meal_pci_probe() 3017 if (qp->happy_meals[qfe_slot] == NULL) happy_meal_pci_probe() 3039 if (qp != NULL) { happy_meal_pci_probe() 3040 hp->qfe_parent = qp; happy_meal_pci_probe() 3042 qp->happy_meals[qfe_slot] = dev; happy_meal_pci_probe() 3110 if (qp != NULL) happy_meal_pci_probe() 3169 struct pci_dev *qpdev = qp->quattro_dev; happy_meal_pci_probe() 3203 if (qp != NULL) happy_meal_pci_probe() 3204 qp->happy_meals[qfe_slot] = NULL; happy_meal_pci_probe()
|
H A D | sunqe.h | 298 #define TX_BUFFS_AVAIL(qp) \ 299 (((qp)->tx_old <= (qp)->tx_new) ? \ 300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \ 301 (qp)->tx_old - (qp)->tx_new - 1)
|
H A D | sunqe.c | 937 struct sunqe *qp = platform_get_drvdata(op); qec_sbus_remove() local 938 struct net_device *net_dev = qp->dev; qec_sbus_remove() 942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE); qec_sbus_remove() 943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE); qec_sbus_remove() 945 qp->qe_block, qp->qblock_dvma); qec_sbus_remove() 947 qp->buffers, qp->buffers_dvma); qec_sbus_remove()
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_cm.c | 681 ep->com.qp = NULL; close_complete_upcall() 713 ep->com.qp = NULL; peer_abort_upcall() 742 ep->com.qp = NULL; connect_reply_upcall() 925 err = iwch_modify_qp(ep->com.qp->rhp, process_mpa_reply() 926 ep->com.qp, mask, &attrs, 1); process_mpa_reply() 930 if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { process_mpa_reply() 1484 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close() 1497 if (ep->com.cm_id && ep->com.qp) { peer_close() 1499 iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, peer_close() 1592 if (ep->com.cm_id && ep->com.qp) { peer_abort() 1594 ret = iwch_modify_qp(ep->com.qp->rhp, peer_abort() 1595 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, peer_abort() 1599 "%s - qp <- error failed!\n", peer_abort() 1659 if ((ep->com.cm_id) && (ep->com.qp)) { close_con_rpl() 1661 iwch_modify_qp(ep->com.qp->rhp, close_con_rpl() 1662 ep->com.qp, close_con_rpl() 1689 * For (1), we save the message in the qp for later consumer consumption. 1705 skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, terminate() 1707 ep->com.qp->attr.terminate_msg_len = skb->len; terminate() 1708 ep->com.qp->attr.is_terminate_local = 0; terminate() 1726 iwch_modify_qp(ep->com.qp->rhp, ec_status() 1727 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ec_status() 1754 if (ep->com.cm_id && ep->com.qp) { ep_timeout() 1756 iwch_modify_qp(ep->com.qp->rhp, ep_timeout() 1757 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ep_timeout() 1801 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); iwch_accept_cr() local 1810 BUG_ON(!qp); iwch_accept_cr() 1812 if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || iwch_accept_cr() 1813 (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { iwch_accept_cr() 1821 ep->com.qp = qp; iwch_accept_cr() 1845 err = iwch_modify_qp(ep->com.qp->rhp, iwch_accept_cr() 1846 ep->com.qp, mask, &attrs, 1); iwch_accept_cr() 1851 if (iwch_rqes_posted(qp)) { iwch_accept_cr() 1870 ep->com.qp = NULL; iwch_accept_cr() 1929 ep->com.qp = get_qhp(h, conn_param->qpn); iwch_connect() 1930 BUG_ON(!ep->com.qp); iwch_connect() 1931 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, iwch_connect() 1932 ep->com.qp, cm_id); iwch_connect()
|
H A D | iwch_provider.h | 182 void iwch_qp_add_ref(struct ib_qp *qp); 183 void iwch_qp_rem_ref(struct ib_qp *qp); 331 int iwch_bind_mw(struct ib_qp *qp,
|
H A D | iwch_ev.c | 65 "qp state %d qpid 0x%x status 0x%x\n", __func__, post_qp_event() 93 event.element.qp = &qhp->ibqp; post_qp_event()
|
H A D | iwch_cm.h | 156 struct iwch_qp *qp; member in struct:iwch_ep_common
|
H A D | iwch_provider.c | 1085 void iwch_qp_add_ref(struct ib_qp *qp) iwch_qp_add_ref() argument 1087 PDBG("%s ib_qp %p\n", __func__, qp); iwch_qp_add_ref() 1088 atomic_inc(&(to_iwch_qp(qp)->refcnt)); iwch_qp_add_ref() 1091 void iwch_qp_rem_ref(struct ib_qp *qp) iwch_qp_rem_ref() argument 1093 PDBG("%s ib_qp %p\n", __func__, qp); iwch_qp_rem_ref() 1094 if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) iwch_qp_rem_ref() 1095 wake_up(&(to_iwch_qp(qp)->wait)); iwch_qp_rem_ref()
|
H A D | iwch_qp.c | 528 int iwch_bind_mw(struct ib_qp *qp, iwch_bind_mw() argument 546 qhp = to_iwch_qp(qp); iwch_bind_mw() 768 return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); iwch_post_zb_read() 818 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp() 832 /* locking hierarchy: cq lock first, then qp lock. */ __flush_qp()
|
H A D | iwch_cq.c | 82 wc->qp = &qhp->ibqp; iwch_poll_cq_one()
|
/linux-4.1.27/include/linux/mlx5/ |
H A D | qp.h | 595 struct mlx5_core_qp *qp, 601 struct mlx5_core_qp *qp); 603 struct mlx5_core_qp *qp); 604 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, 611 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 612 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
|
H A D | driver.h | 487 /* start: qp staff */ 493 /* end: qp staff */
|
/linux-4.1.27/drivers/infiniband/hw/usnic/ |
H A D | usnic_ib_verbs.h | 30 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, 44 int usnic_ib_destroy_qp(struct ib_qp *qp);
|
H A D | usnic_ib_qp_grp.h | 31 * The qp group struct represents all the hw resources needed to present a ib_qp
|
H A D | usnic_ib_qp_grp.c | 86 * The QP res chunk, used to derive qp indices, get_qp_res_chunk() 106 usnic_err("Unable to get qp res with err %ld\n", enable_qp_grp() 116 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n", enable_qp_grp() 147 usnic_err("Unable to get qp res with err %ld\n", disable_qp_grp() 478 ib_event.element.qp = &qp_grp->ibqp; usnic_ib_qp_grp_modify()
|
H A D | usnic_ib_verbs.c | 185 usnic_info("No free qp grp found on %s\n", find_free_vf_and_create_qp_grp() 351 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, usnic_ib_query_qp() argument 364 qp_grp = to_uqp_grp(qp); usnic_ib_query_qp() 524 int usnic_ib_destroy_qp(struct ib_qp *qp) usnic_ib_destroy_qp() argument 531 qp_grp = to_uqp_grp(qp); usnic_ib_destroy_qp() 535 usnic_err("Failed to move qp grp %u to reset\n", usnic_ib_destroy_qp()
|
H A D | usnic_ib_main.c | 120 usnic_err("Failed to transistion qp grp %u from %s to %s\n", usnic_ib_qp_grp_modify_active_to_err()
|
/linux-4.1.27/drivers/scsi/bnx2fc/ |
H A D | bnx2fc_debug.c | 3 * session resources such as connection id and qp resources.
|
H A D | bnx2fc_debug.h | 3 * session resources such as connection id and qp resources.
|
H A D | bnx2fc_tgt.c | 3 * session resources such as connection id and qp resources. 666 *bnx2fc_alloc_session_resc - Allocate qp resources for the session 836 * bnx2i_free_session_resc - free qp resources for the session
|
H A D | bnx2fc_constants.h | 3 * session resources such as connection id and qp resources.
|
H A D | 57xx_hsi_bnx2fc.h | 3 * session resources such as connection id and qp resources.
|
/linux-4.1.27/drivers/infiniband/ulp/iser/ |
H A D | iser_verbs.c | 59 iser_err("got qp event %d\n",cause->event); iser_qp_event_callback() 508 ib_conn->qp = ib_conn->cma_id->qp; iser_create_ib_conn_res() 509 iser_info("setting conn %p cma_id %p qp %p\n", iser_create_ib_conn_res() 511 ib_conn->cma_id->qp); iser_create_ib_conn_res() 625 iser_info("freeing conn %p cma_id %p qp %p\n", iser_free_ib_conn_res() 626 iser_conn, ib_conn->cma_id, ib_conn->qp); iser_free_ib_conn_res() 628 if (ib_conn->qp != NULL) { iser_free_ib_conn_res() 631 ib_conn->qp = NULL; iser_free_ib_conn_res() 712 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr); iser_conn_terminate() 835 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); iser_connected_handler() 836 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); iser_connected_handler() 1020 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); iser_post_recvl() 1049 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); iser_post_recvm() 1081 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); iser_post_send() 1161 ib_conn = wc->qp->qp_context; iser_handle_wc()
|
H A D | iscsi_iser.h | 411 * @qp: Connection Queue-pair 432 struct ib_qp *qp; member in struct:ib_conn
|
H A D | iser_memory.c | 693 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); iser_reg_sig_mr() 769 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); iser_fast_reg_mr()
|
/linux-4.1.27/include/linux/mlx4/ |
H A D | qp.h | 459 int sqd_event, struct mlx4_qp *qp); 461 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 466 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); 473 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
|
H A D | device.h | 84 /* base qkey for use in sriov tunnel-qp/proxy-qp communication. 228 * bits 6 and 7 set in their qp number. 786 int qp; member in struct:mlx4_quotas 852 } __packed qp; member in union:mlx4_eqe::__anon12048 1069 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 1071 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 1082 int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1084 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1086 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1089 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
|
/linux-4.1.27/include/rdma/ |
H A D | ib_verbs.h | 419 struct ib_qp *qp; member in union:ib_event::__anon12871 701 struct ib_qp *qp; member in struct:ib_wc 785 /* Reserve a range for qp types internal to the low level driver. 786 * These qp types will not be visible at the IB core layer, so the 1406 struct ib_qp *qp; member in struct:ib_flow 1549 int (*modify_qp)(struct ib_qp *qp, 1553 int (*query_qp)(struct ib_qp *qp, 1557 int (*destroy_qp)(struct ib_qp *qp); 1558 int (*post_send)(struct ib_qp *qp, 1561 int (*post_recv)(struct ib_qp *qp, 1619 int (*bind_mw)(struct ib_qp *qp, 1631 int (*attach_mcast)(struct ib_qp *qp, 1634 int (*detach_mcast)(struct ib_qp *qp, 1648 struct ib_flow * (*create_flow)(struct ib_qp *qp, 1919 * @qp: The QP to modify. 1925 int ib_modify_qp(struct ib_qp *qp, 1932 * @qp: The QP to query. 1940 int ib_query_qp(struct ib_qp *qp, 1947 * @qp: The QP to destroy. 1949 int ib_destroy_qp(struct ib_qp *qp); 1963 * @qp: The QP handle to release 1968 int ib_close_qp(struct ib_qp *qp); 1973 * @qp: The QP to post the work request on. 1983 static inline int ib_post_send(struct ib_qp *qp, ib_post_send() argument 1987 return qp->device->post_send(qp, send_wr, bad_send_wr); ib_post_send() 1993 * @qp: The QP to post the work request on. 1998 static inline int ib_post_recv(struct ib_qp *qp, ib_post_recv() argument 2002 return qp->device->post_recv(qp, recv_wr, bad_recv_wr); ib_post_recv() 2541 * @qp: QP to post the bind work request on. 2550 static inline int ib_bind_mw(struct ib_qp *qp, ib_bind_mw() argument 2556 mw->device->bind_mw(qp, mw, mw_bind) : ib_bind_mw() 2607 * @qp: QP to attach to the multicast group. The QP must be type 2617 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2621 * @qp: QP to detach from the multicast group. 2625 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2639 struct ib_flow *ib_create_flow(struct ib_qp *qp,
|
H A D | iw_cm.h | 105 void (*add_ref)(struct ib_qp *qp); 107 void (*rem_ref)(struct ib_qp *qp); 152 * @qp: The QP 159 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
|
H A D | rdma_cm.h | 150 struct ib_qp *qp; member in struct:rdma_cm_id 293 * state of the qp associated with the id is modified to error, such that any
|
H A D | ib_mad.h | 350 * @qp: Reference to QP used for sending and receiving MADs. 368 struct ib_qp *qp; member in struct:ib_mad_agent 568 * @qp: Reference to a QP that requires MAD services. 582 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
|
/linux-4.1.27/net/9p/ |
H A D | trans_rdma.c | 68 * @qp: Queue Pair pointer 95 struct ib_qp *qp; member in struct:p9_trans_rdma 388 if (rdma->qp && !IS_ERR(rdma->qp)) rdma_destroy_trans() 389 ib_destroy_qp(rdma->qp); rdma_destroy_trans() 425 return ib_post_recv(rdma->qp, &wr, &bad_wr); post_recv() 529 err = ib_post_send(rdma->qp, &wr, &bad_wr); rdma_request() 746 rdma->qp = rdma->cm_id->qp; rdma_create_trans()
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | ds.c | 995 struct ds_queue_entry *qp, *tmp; process_ds_work() local 1003 list_for_each_entry_safe(qp, tmp, &todo, list) { process_ds_work() 1004 struct ds_data *dpkt = (struct ds_data *) qp->req; process_ds_work() 1005 struct ds_info *dp = qp->dp; process_ds_work() 1007 int req_len = qp->req_len; process_ds_work() 1021 list_del(&qp->list); process_ds_work() 1022 kfree(qp); process_ds_work() 1048 struct ds_queue_entry *qp; ds_data() local 1050 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC); ds_data() 1051 if (!qp) { ds_data() 1054 qp->dp = dp; ds_data() 1055 memcpy(&qp->req, pkt, len); ds_data() 1056 list_add_tail(&qp->list, &ds_work_list); ds_data()
|
/linux-4.1.27/drivers/media/pci/solo6x10/ |
H A D | solo6x10-enc.c | 182 unsigned int qp) solo_s_jpeg_qp() 187 if ((ch > 31) || (qp > 3)) solo_s_jpeg_qp() 206 solo_dev->jpeg_qp[idx] |= (qp & 3) << ch; solo_s_jpeg_qp() 181 solo_s_jpeg_qp(struct solo_dev *solo_dev, unsigned int ch, unsigned int qp) solo_s_jpeg_qp() argument
|
H A D | solo6x10.h | 157 u8 mode, gop, qp, interlaced, interval; member in struct:solo_enc_dev 402 unsigned int qp);
|
H A D | solo6x10-v4l2-enc.c | 261 solo_reg_write(solo_dev, SOLO_VE_CH_QP(ch), solo_enc->qp); solo_enc_on() 266 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(ch), solo_enc->qp); solo_enc_on() 1097 solo_enc->qp = ctrl->val; solo_s_ctrl() 1098 solo_reg_write(solo_dev, SOLO_VE_CH_QP(solo_enc->ch), solo_enc->qp); solo_s_ctrl() 1099 solo_reg_write(solo_dev, SOLO_VE_CH_QP_E(solo_enc->ch), solo_enc->qp); solo_s_ctrl() 1283 solo_enc->qp = SOLO_DEFAULT_QP; solo_enc_alloc()
|
/linux-4.1.27/drivers/scsi/ |
H A D | qlogicpti.h | 504 #define for_each_qlogicpti(qp) \ 505 for((qp) = qptichain; (qp); (qp) = (qp)->next)
|
H A D | ncr53c8xx.c | 4430 struct list_head *qp; ncr_start_next_ccb() local 4437 qp = ncr_list_pop(&lp->wait_ccbq); ncr_start_next_ccb() 4438 if (!qp) ncr_start_next_ccb() 4441 cp = list_entry(qp, struct ccb, link_ccbq); ncr_start_next_ccb() 4442 list_add_tail(qp, &lp->busy_ccbq); ncr_start_next_ccb() 6546 struct list_head *qp; ncr_sir_to_redo() local 6560 qp = lp->busy_ccbq.prev; ncr_sir_to_redo() 6561 while (qp != &lp->busy_ccbq) { ncr_sir_to_redo() 6562 cp2 = list_entry(qp, struct ccb, link_ccbq); ncr_sir_to_redo() 6563 qp = qp->prev; ncr_sir_to_redo() 7156 struct list_head *qp; ncr_get_ccb() local 7172 qp = ncr_list_pop(&lp->free_ccbq); ncr_get_ccb() 7173 if (qp) { ncr_get_ccb() 7174 cp = list_entry(qp, struct ccb, link_ccbq); ncr_get_ccb() 7180 list_add_tail(qp, &lp->wait_ccbq); ncr_get_ccb()
|
/linux-4.1.27/drivers/infiniband/ulp/srp/ |
H A D | ib_srp.c | 260 struct ib_qp *qp) srp_init_qp() 281 ret = ib_modify_qp(qp, attr, srp_init_qp() 476 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE); srp_destroy_qp() 482 ret = ib_post_recv(ch->qp, &wr, &bad_wr); srp_destroy_qp() 488 ib_destroy_qp(ch->qp); srp_destroy_qp() 497 struct ib_qp *qp; srp_create_ch_ib() local 534 qp = ib_create_qp(dev->pd, init_attr); srp_create_ch_ib() 535 if (IS_ERR(qp)) { srp_create_ch_ib() 536 ret = PTR_ERR(qp); srp_create_ch_ib() 540 ret = srp_init_qp(target, qp); srp_create_ch_ib() 568 if (ch->qp) srp_create_ch_ib() 575 ch->qp = qp; srp_create_ch_ib() 583 ib_destroy_qp(qp); srp_create_ch_ib() 615 if (!ch->qp) srp_free_ch_ib() 637 ch->qp = NULL; srp_free_ch_ib() 722 req->param.qp_num = ch->qp->qp_num; srp_send_req() 723 req->param.qp_type = ch->qp->qp_type; srp_send_req() 1009 * back, or SRP_DLID_REDIRECT if we get a lid/qp srp_connect_ch() 1050 return ib_post_send(ch->qp, &wr, &bad_wr); srp_inv_rkey() 1331 return ib_post_send(ch->qp, &wr, &bad_wr); srp_map_finish_fr() 1701 return ib_post_send(ch->qp, &wr, &bad_wr); srp_post_send() 1719 return ib_post_recv(ch->qp, &wr, &bad_wr); srp_post_recv() 1747 rsp->tag, ch - target->ch, ch->qp->qp_num); srp_process_rsp() 2221 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler() 2240 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); srp_cm_rep_handler() 259 srp_init_qp(struct srp_target_port *target, struct ib_qp *qp) srp_init_qp() argument
|
H A D | ib_srp.h | 147 struct ib_qp *qp; member in struct:srp_rdma_ch
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | kprobes.h | 41 unsigned long long qp : 6; member in struct:cmp_inst::__anon1480
|
/linux-4.1.27/drivers/atm/ |
H A D | firestream.c | 631 static int qp; variable 651 pq[qp].cmd = cmd; submit_queue() 652 pq[qp].p0 = p1; submit_queue() 653 pq[qp].p1 = p2; submit_queue() 654 pq[qp].p2 = p3; submit_queue() 655 qp++; submit_queue() 656 if (qp >= 60) qp = 0; submit_queue() 1950 i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); firestream_remove_one() 1951 qp++; firestream_remove_one() 1952 if (qp >= 60) qp = 0; firestream_remove_one()
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
H A D | frwr_ops.c | 237 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr); frwr_op_map() 280 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); frwr_op_unmap()
|
H A D | verbs.c | 447 ib_query_qp(ia->ri_id->qp, attr, rpcrdma_conn_upcall() 706 if (ia->ri_id->qp) rpcrdma_ia_close() 859 if (ia->ri_id->qp) { rpcrdma_ep_destroy() 862 ia->ri_id->qp = NULL; rpcrdma_ep_destroy() 1245 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); rpcrdma_retry_local_inv() 1615 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); rpcrdma_ep_post() 1644 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); rpcrdma_ep_post_recv()
|
H A D | svc_rdma_transport.c | 195 event->event, event->element.qp); qp_event_handler() 206 event->event, event->element.qp); qp_event_handler() 953 newxprt->sc_qp = newxprt->sc_cm_id->qp; svc_rdma_accept()
|
/linux-4.1.27/drivers/infiniband/ulp/isert/ |
H A D | ib_isert.c | 180 return cma_id->qp; isert_create_qp() 190 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id); isert_conn_setup_qp() 191 if (IS_ERR(isert_conn->qp)) { isert_conn_setup_qp() 192 ret = PTR_ERR(isert_conn->qp); isert_conn_setup_qp() 812 if (isert_conn->qp) { isert_connect_release() 813 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context; isert_connect_release() 816 ib_destroy_qp(isert_conn->qp); isert_connect_release() 830 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_connected_handler() 931 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_disconnected_handler() 960 struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_connect_error() 1029 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, isert_post_recv() 1058 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed); isert_post_send() 1150 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail); isert_rdma_post_recvl() 2102 isert_conn = wc->qp->qp_context; isert_handle_wc() 2158 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, isert_post_response() 2637 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); isert_fast_reg_mr() 2750 ret = ib_post_send(isert_conn->qp, wr, &bad_wr); isert_reg_sig_mr() 2954 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); isert_put_datain() 2988 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed); isert_get_dataout() 3368 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) { isert_wait4flush()
|
H A D | ib_isert.h | 172 struct ib_qp *qp; member in struct:isert_conn
|
/linux-4.1.27/net/rds/ |
H A D | iw_cm.c | 580 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, rds_iw_conn_shutdown() 582 ic->i_cm_id ? ic->i_cm_id->qp : NULL); rds_iw_conn_shutdown() 597 if (ic->i_cm_id->qp) { rds_iw_conn_shutdown() 599 ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); rds_iw_conn_shutdown() 629 if (ic->i_cm_id->qp) rds_iw_conn_shutdown()
|
H A D | ib_cm.c | 116 ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); rds_ib_tune_rnr() 174 err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); rds_ib_cm_connect_complete() 638 rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, rds_ib_conn_shutdown() 640 ic->i_cm_id ? ic->i_cm_id->qp : NULL); rds_ib_conn_shutdown() 692 if (ic->i_cm_id->qp) rds_ib_conn_shutdown()
|
H A D | ib_send.c | 342 /* We expect errors as the qp is drained during shutdown */ rds_ib_send_cq_comp_handler() 757 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rds_ib_xmit() 854 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); rds_ib_xmit_atomic() 991 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rds_ib_xmit_rdma()
|
H A D | ib_recv.c | 381 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); rds_ib_recv_refill() 617 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); rds_ib_send_ack() 975 /* We expect errors as the qp is drained during shutdown */ rds_poll_cq()
|
H A D | iw_rdma.c | 725 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); rds_iw_rdma_build_fastreg() 738 if (!ibmr->cm_id->qp || !ibmr->mr) rds_iw_rdma_fastreg_inv() 748 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); rds_iw_rdma_fastreg_inv()
|
H A D | iw_recv.c | 251 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); rds_iw_recv_refill() 448 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); rds_iw_send_ack() 803 /* We expect errors as the qp is drained during shutdown */ rds_poll_cq()
|
H A D | iw_send.c | 294 /* We expect errors as the qp is drained during shutdown */ rds_iw_send_cq_comp_handler() 747 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rds_iw_xmit() 952 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rds_iw_xmit_rdma()
|
/linux-4.1.27/net/sched/ |
H A D | sch_api.c | 142 struct Qdisc_ops *q, **qp; register_qdisc() local 146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) register_qdisc() 172 *qp = qops; register_qdisc() 186 struct Qdisc_ops *q, **qp; unregister_qdisc() local 190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) unregister_qdisc() 194 *qp = q->next; unregister_qdisc()
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
H A D | file.c | 2212 struct mfc_cq_sr *qp, *spuqp; __spufs_dma_info_read() local 2221 qp = &info.dma_info_command_data[i]; __spufs_dma_info_read() 2224 qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; __spufs_dma_info_read() 2225 qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; __spufs_dma_info_read() 2226 qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; __spufs_dma_info_read() 2227 qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; __spufs_dma_info_read() 2264 struct mfc_cq_sr *qp, *puqp; __spufs_proxydma_info_read() local 2278 qp = &info.proxydma_info_command_data[i]; __spufs_proxydma_info_read() 2281 qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; __spufs_proxydma_info_read() 2282 qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; __spufs_proxydma_info_read() 2283 qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; __spufs_proxydma_info_read() 2284 qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; __spufs_proxydma_info_read()
|
/linux-4.1.27/drivers/infiniband/ulp/srpt/ |
H A D | ib_srpt.c | 469 ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc, srpt_mad_recv_handler() 830 ret = ib_post_send(ch->qp, &wr, &bad_wr); srpt_post_send() 948 * Initialized the attributes of queue pair 'qp' by allowing local write, 949 * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT. 951 static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_init_ch_qp() argument 966 ret = ib_modify_qp(qp, attr, srpt_init_ch_qp() 977 * @qp: queue pair to change the state of. 985 static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rtr() argument 998 ret = ib_modify_qp(qp, &qp_attr, attr_mask); srpt_ch_qp_rtr() 1007 * @qp: queue pair to change the state of. 1015 static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp) srpt_ch_qp_rts() argument 1028 ret = ib_modify_qp(qp, &qp_attr, attr_mask); srpt_ch_qp_rts() 1042 return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE); srpt_ch_qp_err() 2056 ch->qp = ib_create_qp(sdev->pd, qp_init); srpt_create_ch_ib() 2057 if (IS_ERR(ch->qp)) { srpt_create_ch_ib() 2058 ret = PTR_ERR(ch->qp); srpt_create_ch_ib() 2076 ret = srpt_init_ch_qp(ch, ch->qp); srpt_create_ch_ib() 2097 ib_destroy_qp(ch->qp); srpt_create_ch_ib() 2108 ib_destroy_qp(ch->qp); srpt_destroy_ch_ib() 2510 ret = srpt_ch_qp_rtr(ch, ch->qp); srpt_cm_req_recv() 2562 rep_param->qp_num = ch->qp->qp_num; srpt_cm_req_recv() 2643 ret = srpt_ch_qp_rts(ch, ch->qp); srpt_cm_rtu_recv() 2824 ret = ib_post_send(ch->qp, &wr, &bad_wr); srpt_perform_rdmas() 2837 ib_post_send(ch->qp, &wr, &bad_wr) != 0) { srpt_perform_rdmas()
|
H A D | ib_srpt.h | 275 * @qp: IB queue pair used for communicating over this channel. 306 struct ib_qp *qp; member in struct:srpt_rdma_ch
|
/linux-4.1.27/mm/ |
H A D | mempolicy.c | 490 struct queue_pages *qp = walk->private; queue_pages_pte_range() local 491 unsigned long flags = qp->flags; queue_pages_pte_range() 514 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) queue_pages_pte_range() 518 migrate_page_add(page, qp->pagelist, flags); queue_pages_pte_range() 530 struct queue_pages *qp = walk->private; queue_pages_hugetlb() local 531 unsigned long flags = qp->flags; queue_pages_hugetlb() 543 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) queue_pages_hugetlb() 548 isolate_huge_page(page, qp->pagelist); queue_pages_hugetlb() 590 struct queue_pages *qp = walk->private; queue_pages_test_walk() local 592 unsigned long flags = qp->flags; queue_pages_test_walk() 605 if (qp->prev && qp->prev->vm_end < vma->vm_start) queue_pages_test_walk() 609 qp->prev = vma; queue_pages_test_walk() 641 struct queue_pages qp = { queue_pages_range() local 652 .private = &qp, queue_pages_range()
|
/linux-4.1.27/drivers/scsi/pm8001/ |
H A D | pm8001_sas.c | 216 uint32_t *qp = (uint32_t *)(((char *) pm8001_phy_control() local 220 phy->invalid_dword_count = qp[0]; pm8001_phy_control() 221 phy->running_disparity_error_count = qp[1]; pm8001_phy_control() 222 phy->loss_of_dword_sync_count = qp[3]; pm8001_phy_control() 223 phy->phy_reset_problem_count = qp[4]; pm8001_phy_control()
|
/linux-4.1.27/net/openvswitch/ |
H A D | flow.c | 304 struct qtag_prefix *qp; parse_vlan() local 313 qp = (struct qtag_prefix *) skb->data; parse_vlan() 314 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); parse_vlan()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_uld.h | 218 struct cxgb4_range qp; member in struct:cxgb4_virt_res
|
/linux-4.1.27/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_main.c | 2802 u32 qp; i40e_vsi_configure_msix() local 2808 qp = vsi->base_queue; i40e_vsi_configure_msix() 2822 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); i40e_vsi_configure_msix() 2827 (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)| i40e_vsi_configure_msix() 2831 wr32(hw, I40E_QINT_RQCTL(qp), val); i40e_vsi_configure_msix() 2836 ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)| i40e_vsi_configure_msix() 2845 wr32(hw, I40E_QINT_TQCTL(qp), val); i40e_vsi_configure_msix() 2846 qp++; i40e_vsi_configure_msix() 3689 u32 val, qp; i40e_vsi_free_irq() local 3722 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) i40e_vsi_free_irq() 3728 while (qp != I40E_QUEUE_END_OF_LIST) { i40e_vsi_free_irq() 3731 val = rd32(hw, I40E_QINT_RQCTL(qp)); i40e_vsi_free_irq() 3741 wr32(hw, I40E_QINT_RQCTL(qp), val); i40e_vsi_free_irq() 3743 val = rd32(hw, I40E_QINT_TQCTL(qp)); i40e_vsi_free_irq() 3756 wr32(hw, I40E_QINT_TQCTL(qp), val); i40e_vsi_free_irq() 3757 qp = next; i40e_vsi_free_irq() 3764 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) i40e_vsi_free_irq() 3770 val = rd32(hw, I40E_QINT_RQCTL(qp)); i40e_vsi_free_irq() 3779 wr32(hw, I40E_QINT_RQCTL(qp), val); i40e_vsi_free_irq() 3781 val = rd32(hw, I40E_QINT_TQCTL(qp)); i40e_vsi_free_irq() 3791 wr32(hw, I40E_QINT_TQCTL(qp), val); i40e_vsi_free_irq() 7441 /* The assumption is that lan qp count will be the highest i40e_config_rss() 7442 * qp count for any PF VSI that needs RSS. i40e_config_rss() 7443 * If multiple VSIs need RSS support, all the qp counts i40e_config_rss() 9449 /* one qp for PF, no queues for anything else */ i40e_determine_queue_usage() 9467 /* one qp for PF */ i40e_determine_queue_usage()
|
/linux-4.1.27/kernel/rcu/ |
H A D | tree_trace.c | 120 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d", print_one_rcu_data()
|