Lines Matching refs:qp

84 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)  in get_wqe()  argument
86 return mlx5_buf_offset(&qp->buf, offset); in get_wqe()
89 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) in get_recv_wqe() argument
91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
94 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) in mlx5_get_send_wqe() argument
96 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe()
116 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, in mlx5_ib_read_user_wqe() argument
119 struct ib_device *ibdev = qp->ibqp.device; in mlx5_ib_read_user_wqe()
121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe()
124 struct ib_umem *umem = qp->umem; in mlx5_ib_read_user_wqe()
131 qp->ibqp.qp_type); in mlx5_ib_read_user_wqe()
170 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) in mlx5_ib_qp_event() argument
172 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx5_ib_qp_event()
176 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx5_ib_qp_event()
180 event.element.qp = ibqp; in mlx5_ib_qp_event()
207 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); in mlx5_ib_qp_event()
216 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) in set_rq_size() argument
226 qp->rq.max_gs = 0; in set_rq_size()
227 qp->rq.wqe_cnt = 0; in set_rq_size()
228 qp->rq.wqe_shift = 0; in set_rq_size()
231 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
232 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
233 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
234 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
236 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; in set_rq_size()
241 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
249 qp->rq.wqe_shift = ilog2(wqe_size); in set_rq_size()
250 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
251 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
325 struct mlx5_ib_qp *qp) in calc_sq_size() argument
344 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - in calc_sq_size()
346 attr->cap.max_inline_data = qp->max_inline_data; in calc_sq_size()
349 qp->signature_en = true; in calc_sq_size()
352 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in calc_sq_size()
353 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
355 qp->sq.wqe_cnt, in calc_sq_size()
359 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in calc_sq_size()
360 qp->sq.max_gs = attr->cap.max_send_sge; in calc_sq_size()
361 qp->sq.max_post = wq_size / wqe_size; in calc_sq_size()
362 attr->cap.max_send_wr = qp->sq.max_post; in calc_sq_size()
368 struct mlx5_ib_qp *qp, in set_user_buf_size() argument
371 int desc_sz = 1 << qp->sq.wqe_shift; in set_user_buf_size()
385 qp->sq.wqe_cnt = ucmd->sq_wqe_count; in set_user_buf_size()
387 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size()
389 qp->sq.wqe_cnt, in set_user_buf_size()
394 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_buf_size()
395 (qp->sq.wqe_cnt << 6); in set_user_buf_size()
594 struct mlx5_ib_qp *qp, struct ib_udata *udata, in create_user_qp() argument
637 qp->rq.offset = 0; in create_user_qp()
638 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in create_user_qp()
639 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_user_qp()
641 err = set_user_buf_size(dev, qp, &ucmd); in create_user_qp()
645 if (ucmd.buf_addr && qp->buf_size) { in create_user_qp()
646 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, in create_user_qp()
647 qp->buf_size, 0, 0); in create_user_qp()
648 if (IS_ERR(qp->umem)) { in create_user_qp()
650 err = PTR_ERR(qp->umem); in create_user_qp()
654 qp->umem = NULL; in create_user_qp()
657 if (qp->umem) { in create_user_qp()
658 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, in create_user_qp()
666 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); in create_user_qp()
675 if (qp->umem) in create_user_qp()
676 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); in create_user_qp()
683 qp->uuarn = uuarn; in create_user_qp()
685 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); in create_user_qp()
696 qp->create_type = MLX5_QP_USER; in create_user_qp()
701 mlx5_ib_db_unmap_user(context, &qp->db); in create_user_qp()
707 if (qp->umem) in create_user_qp()
708 ib_umem_release(qp->umem); in create_user_qp()
715 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) in destroy_qp_user() argument
720 mlx5_ib_db_unmap_user(context, &qp->db); in destroy_qp_user()
721 if (qp->umem) in destroy_qp_user()
722 ib_umem_release(qp->umem); in destroy_qp_user()
723 free_uuar(&context->uuari, qp->uuarn); in destroy_qp_user()
728 struct mlx5_ib_qp *qp, in create_kernel_qp() argument
750 qp->bf = &uuari->bfs[uuarn]; in create_kernel_qp()
751 uar_index = qp->bf->uar->index; in create_kernel_qp()
753 err = calc_sq_size(dev, init_attr, qp); in create_kernel_qp()
759 qp->rq.offset = 0; in create_kernel_qp()
760 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in create_kernel_qp()
761 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); in create_kernel_qp()
763 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf); in create_kernel_qp()
769 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); in create_kernel_qp()
770 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; in create_kernel_qp()
778 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); in create_kernel_qp()
783 mlx5_fill_page_array(&qp->buf, (*in)->pas); in create_kernel_qp()
785 err = mlx5_db_alloc(dev->mdev, &qp->db); in create_kernel_qp()
791 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); in create_kernel_qp()
792 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); in create_kernel_qp()
793 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); in create_kernel_qp()
794 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); in create_kernel_qp()
795 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); in create_kernel_qp()
797 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || in create_kernel_qp()
798 !qp->sq.w_list || !qp->sq.wqe_head) { in create_kernel_qp()
802 qp->create_type = MLX5_QP_KERNEL; in create_kernel_qp()
807 mlx5_db_free(dev->mdev, &qp->db); in create_kernel_qp()
808 kfree(qp->sq.wqe_head); in create_kernel_qp()
809 kfree(qp->sq.w_list); in create_kernel_qp()
810 kfree(qp->sq.wrid); in create_kernel_qp()
811 kfree(qp->sq.wr_data); in create_kernel_qp()
812 kfree(qp->rq.wrid); in create_kernel_qp()
818 mlx5_buf_free(dev->mdev, &qp->buf); in create_kernel_qp()
825 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) in destroy_qp_kernel() argument
827 mlx5_db_free(dev->mdev, &qp->db); in destroy_qp_kernel()
828 kfree(qp->sq.wqe_head); in destroy_qp_kernel()
829 kfree(qp->sq.w_list); in destroy_qp_kernel()
830 kfree(qp->sq.wrid); in destroy_qp_kernel()
831 kfree(qp->sq.wr_data); in destroy_qp_kernel()
832 kfree(qp->rq.wrid); in destroy_qp_kernel()
833 mlx5_buf_free(dev->mdev, &qp->buf); in destroy_qp_kernel()
834 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); in destroy_qp_kernel()
837 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) in get_rx_type() argument
842 else if (!qp->has_rq) in get_rx_type()
858 struct ib_udata *udata, struct mlx5_ib_qp *qp) in create_qp_common() argument
868 mlx5_ib_odp_create_qp(qp); in create_qp_common()
870 mutex_init(&qp->mutex); in create_qp_common()
871 spin_lock_init(&qp->sq.lock); in create_qp_common()
872 spin_lock_init(&qp->rq.lock); in create_qp_common()
879 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
884 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_qp_common()
892 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); in create_qp_common()
893 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); in create_qp_common()
895 qp->wq_sig = !!wq_signature; in create_qp_common()
898 qp->has_rq = qp_has_rq(init_attr); in create_qp_common()
899 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, in create_qp_common()
900 qp, (pd && pd->uobject) ? &ucmd : NULL); in create_qp_common()
911 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || in create_qp_common()
912 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { in create_qp_common()
921 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); in create_qp_common()
925 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); in create_qp_common()
937 qp->create_type = MLX5_QP_EMPTY; in create_qp_common()
941 qp->port = init_attr->port_num; in create_qp_common()
951 if (qp->wq_sig) in create_qp_common()
954 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) in create_qp_common()
957 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { in create_qp_common()
977 if (qp->rq.wqe_cnt) { in create_qp_common()
978 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); in create_qp_common()
979 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; in create_qp_common()
982 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); in create_qp_common()
984 if (qp->sq.wqe_cnt) in create_qp_common()
985 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); in create_qp_common()
1019 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); in create_qp_common()
1021 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); in create_qp_common()
1032 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1034 qp->mqp.event = mlx5_ib_qp_event; in create_qp_common()
1039 if (qp->create_type == MLX5_QP_USER) in create_qp_common()
1040 destroy_qp_user(pd, qp); in create_qp_common()
1041 else if (qp->create_type == MLX5_QP_KERNEL) in create_qp_common()
1042 destroy_qp_kernel(dev, qp); in create_qp_common()
1106 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) in get_pd() argument
1108 return to_mpd(qp->ibqp.pd); in get_pd()
1111 static void get_cqs(struct mlx5_ib_qp *qp, in get_cqs() argument
1114 switch (qp->ibqp.qp_type) { in get_cqs()
1121 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1132 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1133 *recv_cq = to_mcq(qp->ibqp.recv_cq); in get_cqs()
1145 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) in destroy_qp_common() argument
1155 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1156 mlx5_ib_qp_disable_pagefaults(qp); in destroy_qp_common()
1157 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), in destroy_qp_common()
1158 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) in destroy_qp_common()
1160 qp->mqp.qpn); in destroy_qp_common()
1163 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
1165 if (qp->create_type == MLX5_QP_KERNEL) { in destroy_qp_common()
1167 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1168 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in destroy_qp_common()
1170 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1174 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); in destroy_qp_common()
1176 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); in destroy_qp_common()
1180 if (qp->create_type == MLX5_QP_KERNEL) in destroy_qp_common()
1181 destroy_qp_kernel(dev, qp); in destroy_qp_common()
1182 else if (qp->create_type == MLX5_QP_USER) in destroy_qp_common()
1183 destroy_qp_user(&get_pd(qp)->ibpd, qp); in destroy_qp_common()
1222 struct mlx5_ib_qp *qp; in mlx5_ib_create_qp() local
1259 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx5_ib_create_qp()
1260 if (!qp) in mlx5_ib_create_qp()
1263 err = create_qp_common(dev, pd, init_attr, udata, qp); in mlx5_ib_create_qp()
1266 kfree(qp); in mlx5_ib_create_qp()
1271 qp->ibqp.qp_num = 0; in mlx5_ib_create_qp()
1273 qp->ibqp.qp_num = 1; in mlx5_ib_create_qp()
1275 qp->ibqp.qp_num = qp->mqp.qpn; in mlx5_ib_create_qp()
1278 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, in mlx5_ib_create_qp()
1281 qp->xrcdn = xrcdn; in mlx5_ib_create_qp()
1296 return &qp->ibqp; in mlx5_ib_create_qp()
1299 int mlx5_ib_destroy_qp(struct ib_qp *qp) in mlx5_ib_destroy_qp() argument
1301 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_destroy_qp()
1302 struct mlx5_ib_qp *mqp = to_mqp(qp); in mlx5_ib_destroy_qp()
1311 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx5_access_flags() argument
1321 dest_rd_atomic = qp->resp_depth; in to_mlx5_access_flags()
1326 access_flags = qp->atomic_rd_en; in to_mlx5_access_flags()
1557 struct mlx5_ib_qp *qp = to_mqp(ibqp); in __mlx5_ib_modify_qp() local
1620 context->pri_path.port = qp->port; in __mlx5_ib_modify_qp()
1627 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, in __mlx5_ib_modify_qp()
1643 pd = get_pd(qp); in __mlx5_ib_modify_qp()
1644 get_cqs(qp, &send_cq, &recv_cq); in __mlx5_ib_modify_qp()
1673 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); in __mlx5_ib_modify_qp()
1684 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) in __mlx5_ib_modify_qp()
1685 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx5_ib_modify_qp()
1710 mlx5_ib_qp_disable_pagefaults(qp); in __mlx5_ib_modify_qp()
1717 &qp->mqp); in __mlx5_ib_modify_qp()
1722 mlx5_ib_qp_enable_pagefaults(qp); in __mlx5_ib_modify_qp()
1724 qp->state = new_state; in __mlx5_ib_modify_qp()
1727 qp->atomic_rd_en = attr->qp_access_flags; in __mlx5_ib_modify_qp()
1729 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx5_ib_modify_qp()
1731 qp->port = attr->port_num; in __mlx5_ib_modify_qp()
1733 qp->alt_port = attr->alt_port_num; in __mlx5_ib_modify_qp()
1740 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx5_ib_modify_qp()
1743 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx5_ib_modify_qp()
1745 qp->rq.head = 0; in __mlx5_ib_modify_qp()
1746 qp->rq.tail = 0; in __mlx5_ib_modify_qp()
1747 qp->sq.head = 0; in __mlx5_ib_modify_qp()
1748 qp->sq.tail = 0; in __mlx5_ib_modify_qp()
1749 qp->sq.cur_post = 0; in __mlx5_ib_modify_qp()
1750 qp->sq.last_poll = 0; in __mlx5_ib_modify_qp()
1751 qp->db.db[MLX5_RCV_DBR] = 0; in __mlx5_ib_modify_qp()
1752 qp->db.db[MLX5_SND_DBR] = 0; in __mlx5_ib_modify_qp()
1764 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_modify_qp() local
1769 mutex_lock(&qp->mutex); in mlx5_ib_modify_qp()
1771 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in mlx5_ib_modify_qp()
1785 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in mlx5_ib_modify_qp()
1809 mutex_unlock(&qp->mutex); in mlx5_ib_modify_qp()
2078 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, in set_data_inl_seg() argument
2082 void *qend = qp->sq.qend; in set_data_inl_seg()
2096 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
2104 wqe = mlx5_get_send_wqe(qp, 0); in set_data_inl_seg()
2225 struct mlx5_ib_qp *qp, void **seg, int *size) in set_sig_data_segment() argument
2309 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
2310 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
2319 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
2320 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
2357 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, in set_sig_umr_wr() argument
2362 u32 pdn = get_pd(qp)->pdn; in set_sig_umr_wr()
2368 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || in set_sig_umr_wr()
2390 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
2391 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
2396 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
2397 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
2399 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
2433 static int set_reg_wr(struct mlx5_ib_qp *qp, in set_reg_wr() argument
2438 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); in set_reg_wr()
2441 mlx5_ib_warn(to_mdev(qp->ibqp.device), in set_reg_wr()
2449 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
2450 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
2455 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
2456 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
2465 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) in set_linv_wr() argument
2470 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
2471 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
2475 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
2476 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
2479 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) in dump_wqe() argument
2485 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); in dump_wqe()
2488 void *buf = mlx5_get_send_wqe(qp, tidx); in dump_wqe()
2489 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
2500 unsigned bytecnt, struct mlx5_ib_qp *qp) in mlx5_bf_copy() argument
2512 if (unlikely(src == qp->sq.qend)) in mlx5_bf_copy()
2513 src = mlx5_get_send_wqe(qp, 0); in mlx5_bf_copy()
2534 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, in begin_wqe() argument
2541 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { in begin_wqe()
2546 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in begin_wqe()
2547 *seg = mlx5_get_send_wqe(qp, *idx); in begin_wqe()
2551 (*ctrl)->fm_ce_se = qp->sq_signal_bits | in begin_wqe()
2563 static void finish_wqe(struct mlx5_ib_qp *qp, in finish_wqe() argument
2571 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | in finish_wqe()
2573 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); in finish_wqe()
2575 qp->fm_cache = next_fence; in finish_wqe()
2576 if (unlikely(qp->wq_sig)) in finish_wqe()
2579 qp->sq.wrid[idx] = wr_id; in finish_wqe()
2580 qp->sq.w_list[idx].opcode = mlx5_opcode; in finish_wqe()
2581 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe()
2582 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in finish_wqe()
2583 qp->sq.w_list[idx].next = qp->sq.cur_post; in finish_wqe()
2592 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_post_send() local
2596 struct mlx5_bf *bf = qp->bf; in mlx5_ib_post_send()
2598 void *qend = qp->sq.qend; in mlx5_ib_post_send()
2610 spin_lock_irqsave(&qp->sq.lock, flags); in mlx5_ib_post_send()
2620 fence = qp->fm_cache; in mlx5_ib_post_send()
2622 if (unlikely(num_sge > qp->sq.max_gs)) { in mlx5_ib_post_send()
2629 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); in mlx5_ib_post_send()
2664 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; in mlx5_ib_post_send()
2666 set_linv_wr(qp, &seg, &size); in mlx5_ib_post_send()
2672 qp->sq.wr_data[idx] = IB_WR_REG_MR; in mlx5_ib_post_send()
2674 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); in mlx5_ib_post_send()
2683 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; in mlx5_ib_post_send()
2687 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
2694 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2703 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2721 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2724 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2743 finish_wqe(qp, ctrl, size, idx, wr->wr_id, in mlx5_ib_post_send()
2776 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2785 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; in mlx5_ib_post_send()
2791 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2796 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2806 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
2818 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2829 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
2834 dump_wqe(qp, idx, size); in mlx5_ib_post_send()
2839 qp->sq.head += nreq; in mlx5_ib_post_send()
2846 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); in mlx5_ib_post_send()
2859 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); in mlx5_ib_post_send()
2876 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx5_ib_post_send()
2889 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_post_recv() local
2898 spin_lock_irqsave(&qp->rq.lock, flags); in mlx5_ib_post_recv()
2900 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
2903 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx5_ib_post_recv()
2909 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
2915 scat = get_recv_wqe(qp, ind); in mlx5_ib_post_recv()
2916 if (qp->wq_sig) in mlx5_ib_post_recv()
2922 if (i < qp->rq.max_gs) { in mlx5_ib_post_recv()
2928 if (qp->wq_sig) { in mlx5_ib_post_recv()
2930 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); in mlx5_ib_post_recv()
2933 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()
2935 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
2940 qp->rq.head += nreq; in mlx5_ib_post_recv()
2947 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx5_ib_post_recv()
2950 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx5_ib_post_recv()
3028 struct mlx5_ib_qp *qp = to_mqp(ibqp); in mlx5_ib_query_qp() local
3042 mutex_lock(&qp->mutex); in mlx5_ib_query_qp()
3049 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); in mlx5_ib_query_qp()
3055 qp->state = to_ib_qp_state(mlx5_state); in mlx5_ib_query_qp()
3056 qp_attr->qp_state = qp->state; in mlx5_ib_query_qp()
3067 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx5_ib_query_qp()
3091 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx5_ib_query_qp()
3092 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx5_ib_query_qp()
3095 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx5_ib_query_qp()
3096 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx5_ib_query_qp()
3110 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx5_ib_query_qp()
3113 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? in mlx5_ib_query_qp()
3120 mutex_unlock(&qp->mutex); in mlx5_ib_query_qp()