Lines Matching refs:qp
81 struct mlx4_ib_qp qp; member
122 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
125 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
135 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
160 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
167 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
168 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
174 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
183 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
185 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
188 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
190 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
193 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
195 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
207 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) in stamp_send_wqe() argument
217 if (qp->sq_max_wqes_per_wr > 1) { in stamp_send_wqe()
218 s = roundup(size, 1U << qp->sq.wqe_shift); in stamp_send_wqe()
220 ind = (i >> qp->sq.wqe_shift) + n; in stamp_send_wqe()
221 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : in stamp_send_wqe()
223 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
224 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe()
228 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
237 static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) in post_nop_wqe() argument
244 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe()
247 if (qp->ibqp.qp_type == IB_QPT_UD) { in post_nop_wqe()
251 av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); in post_nop_wqe()
269 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in post_nop_wqe()
271 stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); in post_nop_wqe()
275 static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) in pad_wraparound() argument
277 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); in pad_wraparound()
278 if (unlikely(s < qp->sq_max_wqes_per_wr)) { in pad_wraparound()
279 post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); in pad_wraparound()
285 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
288 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
291 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
295 event.element.qp = ibqp; in mlx4_ib_qp_event()
323 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
378 int is_user, int has_rq, struct mlx4_ib_qp *qp) in set_rq_size() argument
389 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
395 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
396 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
397 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size()
402 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
403 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
405 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
406 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
407 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
416 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
423 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
438 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
475 qp->sq_signal_bits && BITS_PER_LONG == 64 && in set_kernel_sq_size()
479 qp->sq.wqe_shift = ilog2(64); in set_kernel_sq_size()
481 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
484 qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); in set_kernel_sq_size()
490 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
491 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * in set_kernel_sq_size()
492 qp->sq_max_wqes_per_wr + in set_kernel_sq_size()
493 qp->sq_spare_wqes); in set_kernel_sq_size()
495 if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) in set_kernel_sq_size()
498 if (qp->sq_max_wqes_per_wr <= 1) in set_kernel_sq_size()
501 ++qp->sq.wqe_shift; in set_kernel_sq_size()
504 qp->sq.max_gs = (min(dev->dev->caps.max_sq_desc_sz, in set_kernel_sq_size()
505 (qp->sq_max_wqes_per_wr << qp->sq.wqe_shift)) - in set_kernel_sq_size()
506 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
509 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
510 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
511 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
512 qp->rq.offset = 0; in set_kernel_sq_size()
513 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
515 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
516 qp->sq.offset = 0; in set_kernel_sq_size()
519 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
520 (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; in set_kernel_sq_size()
521 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
531 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
541 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
542 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
544 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
545 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
550 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
554 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
555 kmalloc(sizeof (struct mlx4_ib_buf) * qp->rq.wqe_cnt, in alloc_proxy_bufs()
557 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
559 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
560 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
563 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
565 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
566 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
569 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
570 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
579 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
582 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
584 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
585 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
589 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
593 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
594 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
597 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
599 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
628 struct mlx4_ib_qp *qp; in create_qp_common() local
681 qp = &sqp->qp; in create_qp_common()
682 qp->pri.vid = 0xFFFF; in create_qp_common()
683 qp->alt.vid = 0xFFFF; in create_qp_common()
685 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); in create_qp_common()
686 if (!qp) in create_qp_common()
688 qp->pri.vid = 0xFFFF; in create_qp_common()
689 qp->alt.vid = 0xFFFF; in create_qp_common()
692 qp = *caller_qp; in create_qp_common()
694 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
696 mutex_init(&qp->mutex); in create_qp_common()
697 spin_lock_init(&qp->sq.lock); in create_qp_common()
698 spin_lock_init(&qp->rq.lock); in create_qp_common()
699 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
700 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
702 qp->state = IB_QPS_RESET; in create_qp_common()
704 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
706 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, qp_has_rq(init_attr), qp); in create_qp_common()
718 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
720 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
724 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_qp_common()
725 qp->buf_size, 0, 0); in create_qp_common()
726 if (IS_ERR(qp->umem)) { in create_qp_common()
727 err = PTR_ERR(qp->umem); in create_qp_common()
731 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem), in create_qp_common()
732 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common()
736 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
742 ucmd.db_addr, &qp->db); in create_qp_common()
747 qp->sq_no_prefetch = 0; in create_qp_common()
750 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
753 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
758 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
763 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
768 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); in create_qp_common()
772 *qp->db.db = 0; in create_qp_common()
775 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) { in create_qp_common()
780 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
781 &qp->mtt); in create_qp_common()
785 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); in create_qp_common()
789 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof (u64), gfp); in create_qp_common()
790 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof (u64), gfp); in create_qp_common()
791 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
798 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
800 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
816 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
825 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); in create_qp_common()
830 qp->mqp.qpn |= (1 << 23); in create_qp_common()
837 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
839 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
841 *caller_qp = qp; in create_qp_common()
849 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
854 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
856 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
864 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
870 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
871 free_proxy_bufs(pd->device, qp); in create_qp_common()
875 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db); in create_qp_common()
877 kfree(qp->sq.wrid); in create_qp_common()
878 kfree(qp->rq.wrid); in create_qp_common()
882 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
886 ib_umem_release(qp->umem); in create_qp_common()
888 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
892 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
896 kfree(qp); in create_qp_common()
944 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
948 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
954 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
956 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
957 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
959 return to_mpd(qp->ibqp.pd); in get_pd()
962 static void get_cqs(struct mlx4_ib_qp *qp, in get_cqs() argument
965 switch (qp->ibqp.qp_type) { in get_cqs()
967 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
971 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
975 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
976 *recv_cq = to_mcq(qp->ibqp.recv_cq); in get_cqs()
981 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
987 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
988 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
989 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
991 qp->mqp.qpn); in destroy_qp_common()
992 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
993 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
994 qp->pri.smac = 0; in destroy_qp_common()
995 qp->pri.smac_port = 0; in destroy_qp_common()
997 if (qp->alt.smac) { in destroy_qp_common()
998 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
999 qp->alt.smac = 0; in destroy_qp_common()
1001 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1002 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1003 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1004 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1005 qp->pri.update_vid = 0; in destroy_qp_common()
1007 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1008 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1009 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1010 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1011 qp->alt.update_vid = 0; in destroy_qp_common()
1015 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
1021 list_del(&qp->qps_list); in destroy_qp_common()
1022 list_del(&qp->cq_send_list); in destroy_qp_common()
1023 list_del(&qp->cq_recv_list); in destroy_qp_common()
1025 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1026 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1028 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1031 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1036 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1038 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1039 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1040 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1042 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1045 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1048 if (qp->rq.wqe_cnt) in destroy_qp_common()
1049 mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context), in destroy_qp_common()
1050 &qp->db); in destroy_qp_common()
1051 ib_umem_release(qp->umem); in destroy_qp_common()
1053 kfree(qp->sq.wrid); in destroy_qp_common()
1054 kfree(qp->rq.wrid); in destroy_qp_common()
1055 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1057 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1058 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1059 if (qp->rq.wqe_cnt) in destroy_qp_common()
1060 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1063 del_gid_entries(qp); in destroy_qp_common()
1087 struct mlx4_ib_qp *qp = NULL; in mlx4_ib_create_qp() local
1133 qp = kzalloc(sizeof *qp, gfp); in mlx4_ib_create_qp()
1134 if (!qp) in mlx4_ib_create_qp()
1136 qp->pri.vid = 0xFFFF; in mlx4_ib_create_qp()
1137 qp->alt.vid = 0xFFFF; in mlx4_ib_create_qp()
1142 udata, 0, &qp, gfp); in mlx4_ib_create_qp()
1146 qp->ibqp.qp_num = qp->mqp.qpn; in mlx4_ib_create_qp()
1147 qp->xrcdn = xrcdn; in mlx4_ib_create_qp()
1160 &qp, gfp); in mlx4_ib_create_qp()
1164 qp->port = init_attr->port_num; in mlx4_ib_create_qp()
1165 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; in mlx4_ib_create_qp()
1174 return &qp->ibqp; in mlx4_ib_create_qp()
1177 int mlx4_ib_destroy_qp(struct ib_qp *qp) in mlx4_ib_destroy_qp() argument
1179 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_destroy_qp()
1180 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1227 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1237 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1242 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1387 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1392 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1393 mlx4_mac_to_u64((u8 *)qp->smac), in mlx4_set_path()
1394 (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, in mlx4_set_path()
1399 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1404 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1405 mlx4_mac_to_u64((u8 *)qp->alt_smac), in mlx4_set_alt_path()
1407 qp->alt_vlan_id : 0xffff, in mlx4_set_alt_path()
1411 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1415 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1416 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1418 ge->port = qp->port; in update_mcg_macs()
1423 static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac, in handle_eth_ud_smac_index() argument
1429 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1431 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1432 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1433 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1435 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1436 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1437 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1451 struct mlx4_ib_qp *qp = to_mqp(ibqp); in __mlx4_ib_modify_qp() local
1462 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1471 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
1495 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
1510 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1511 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1512 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1514 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
1515 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
1516 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
1519 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
1520 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
1525 if (qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1542 if (dev->counters[qp->port - 1] != -1) { in __mlx4_ib_modify_qp()
1544 dev->counters[qp->port - 1]; in __mlx4_ib_modify_qp()
1549 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
1550 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
1556 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1563 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
1565 attr->port_num : qp->port)) in __mlx4_ib_modify_qp()
1586 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
1596 pd = get_pd(qp); in __mlx4_ib_modify_qp()
1597 get_cqs(qp, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
1604 if (!qp->ibqp.uobject) in __mlx4_ib_modify_qp()
1635 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
1651 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1656 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
1673 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx4_ib_modify_qp()
1674 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
1681 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
1682 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
1683 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
1686 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
1689 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
1693 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1695 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
1699 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
1700 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
1701 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
1702 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); in __mlx4_ib_modify_qp()
1707 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
1708 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
1713 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { in __mlx4_ib_modify_qp()
1725 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
1753 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
1754 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
1756 if (qp->sq_max_wqes_per_wr == 1) in __mlx4_ib_modify_qp()
1757 ctrl->fence_size = 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
1759 stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); in __mlx4_ib_modify_qp()
1763 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
1765 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
1769 qp->state = new_state; in __mlx4_ib_modify_qp()
1772 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
1774 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
1776 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
1777 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
1780 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
1782 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
1783 store_sqp_attrs(to_msqp(qp), attr, attr_mask); in __mlx4_ib_modify_qp()
1789 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
1791 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
1793 qp->port); in __mlx4_ib_modify_qp()
1797 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
1806 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
1809 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
1811 qp->rq.head = 0; in __mlx4_ib_modify_qp()
1812 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
1813 qp->sq.head = 0; in __mlx4_ib_modify_qp()
1814 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
1815 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
1816 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
1817 *qp->db.db = 0; in __mlx4_ib_modify_qp()
1819 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
1820 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
1822 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
1823 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
1824 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
1825 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
1827 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
1828 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
1829 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
1831 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
1832 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
1833 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
1834 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1835 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
1838 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
1839 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
1840 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
1841 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1842 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
1847 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
1849 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
1850 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
1852 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
1854 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
1855 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
1856 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
1857 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
1858 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
1860 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
1861 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
1862 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
1864 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
1866 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
1868 if (qp->alt.smac) in __mlx4_ib_modify_qp()
1869 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
1870 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
1871 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
1872 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
1874 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
1875 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
1876 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
1879 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
1881 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
1882 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
1883 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
1885 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
1886 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
1887 qp->pri.vid); in __mlx4_ib_modify_qp()
1888 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
1889 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
1890 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
1892 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1893 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
1896 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
1898 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
1899 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
1900 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
1902 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
1903 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
1904 qp->alt.vid); in __mlx4_ib_modify_qp()
1905 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
1906 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
1907 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
1909 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
1910 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
1920 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_modify_qp() local
1924 mutex_lock(&qp->mutex); in mlx4_ib_modify_qp()
1926 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx4_ib_modify_qp()
1932 int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
1977 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx4_ib_modify_qp()
2016 mutex_unlock(&qp->mutex); in mlx4_ib_modify_qp()
2037 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header()
2059 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2064 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2081 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); in build_sriov_qp0_header()
2083 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2087 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); in build_sriov_qp0_header()
2091 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2094 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2098 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
2159 struct ib_device *ib_dev = sqp->qp.ibqp.device; in build_mlx_header()
2180 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
2228 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2231 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2246 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
2283 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); in build_mlx_header()
2302 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
2307 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
2308 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
2310 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
2316 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
2598 struct mlx4_ib_qp *qp, unsigned *lso_seg_len, in build_lso_seg() argument
2606 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
2607 wr->num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
2642 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_send() local
2660 spin_lock_irqsave(&qp->sq.lock, flags); in mlx4_ib_post_send()
2668 ind = qp->sq_next_wqe; in mlx4_ib_post_send()
2674 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()
2680 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in mlx4_ib_post_send()
2686 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in mlx4_ib_post_send()
2687 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send()
2697 qp->sq_signal_bits; in mlx4_ib_post_send()
2704 switch (qp->mlx4_ib_qp_type) { in mlx4_ib_post_send()
2775 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2798 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz, &blh); in mlx4_ib_post_send()
2810 err = build_sriov_qp0_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2832 qp->mlx4_ib_qp_type); in mlx4_ib_post_send()
2842 err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen); in mlx4_ib_post_send()
2867 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in mlx4_ib_post_send()
2868 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in mlx4_ib_post_send()
2869 qp->mlx4_ib_qp_type & in mlx4_ib_post_send()
2903 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in mlx4_ib_post_send()
2905 stamp = ind + qp->sq_spare_wqes; in mlx4_ib_post_send()
2906 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); in mlx4_ib_post_send()
2918 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
2919 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
2925 qp->sq.head += nreq; in mlx4_ib_post_send()
2933 writel(qp->doorbell_qpn, in mlx4_ib_post_send()
2942 stamp_send_wqe(qp, stamp, size * 16); in mlx4_ib_post_send()
2944 ind = pad_wraparound(qp, ind); in mlx4_ib_post_send()
2945 qp->sq_next_wqe = ind; in mlx4_ib_post_send()
2948 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx4_ib_post_send()
2956 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_post_recv() local
2966 max_gs = qp->rq.max_gs; in mlx4_ib_post_recv()
2967 spin_lock_irqsave(&qp->rq.lock, flags); in mlx4_ib_post_recv()
2976 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
2979 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx4_ib_post_recv()
2985 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx4_ib_post_recv()
2991 scat = get_recv_wqe(qp, ind); in mlx4_ib_post_recv()
2993 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in mlx4_ib_post_recv()
2996 qp->sqp_proxy_rcv[ind].map, in mlx4_ib_post_recv()
3003 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in mlx4_ib_post_recv()
3017 qp->rq.wrid[ind] = wr->wr_id; in mlx4_ib_post_recv()
3019 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx4_ib_post_recv()
3024 qp->rq.head += nreq; in mlx4_ib_post_recv()
3032 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx4_ib_post_recv()
3035 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx4_ib_post_recv()
3119 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
3124 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
3126 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
3131 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
3139 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
3140 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
3151 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx4_ib_query_qp()
3160 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
3180 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
3181 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
3184 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
3185 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
3200 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
3203 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
3206 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
3210 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
3214 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()