Lines Matching refs:seg
1992 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, in set_reg_mkey_seg() argument
1998 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_seg()
1999 seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT; in set_reg_mkey_seg()
2000 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); in set_reg_mkey_seg()
2001 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); in set_reg_mkey_seg()
2002 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
2003 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
2004 seg->xlt_oct_size = cpu_to_be32(ndescs); in set_reg_mkey_seg()
2005 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
2008 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) in set_linv_mkey_seg() argument
2010 memset(seg, 0, sizeof(*seg)); in set_linv_mkey_seg()
2011 seg->status = MLX5_MKEY_STATUS_FREE; in set_linv_mkey_seg()
2014 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) in set_reg_mkey_segment() argument
2018 memset(seg, 0, sizeof(*seg)); in set_reg_mkey_segment()
2020 seg->status = MLX5_MKEY_STATUS_FREE; in set_reg_mkey_segment()
2024 seg->flags = convert_access(umrwr->access_flags); in set_reg_mkey_segment()
2026 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); in set_reg_mkey_segment()
2027 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr); in set_reg_mkey_segment()
2029 seg->len = cpu_to_be64(umrwr->length); in set_reg_mkey_segment()
2030 seg->log2_page_size = umrwr->page_shift; in set_reg_mkey_segment()
2031 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | in set_reg_mkey_segment()
2081 struct mlx5_wqe_inline_seg *seg; in set_data_inl_seg() local
2089 seg = wqe; in set_data_inl_seg()
2090 wqe += sizeof(*seg); in set_data_inl_seg()
2110 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
2112 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; in set_data_inl_seg()
2225 struct mlx5_ib_qp *qp, void **seg, int *size) in set_sig_data_segment() argument
2250 struct mlx5_klm *data_klm = *seg; in set_sig_data_segment()
2278 sblock_ctrl = *seg; in set_sig_data_segment()
2307 *seg += wqe_size; in set_sig_data_segment()
2309 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
2310 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
2312 bsf = *seg; in set_sig_data_segment()
2317 *seg += sizeof(*bsf); in set_sig_data_segment()
2319 if (unlikely((*seg == qp->sq.qend))) in set_sig_data_segment()
2320 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_data_segment()
2325 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, in set_sig_mkey_segment() argument
2333 memset(seg, 0, sizeof(*seg)); in set_sig_mkey_segment()
2335 seg->flags = get_umr_flags(wr->access_flags) | in set_sig_mkey_segment()
2337 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); in set_sig_mkey_segment()
2338 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | in set_sig_mkey_segment()
2340 seg->len = cpu_to_be64(length); in set_sig_mkey_segment()
2341 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); in set_sig_mkey_segment()
2342 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_mkey_segment()
2358 void **seg, int *size) in set_sig_umr_wr() argument
2387 set_sig_umr_segment(*seg, klm_oct_size); in set_sig_umr_wr()
2388 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_sig_umr_wr()
2390 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
2391 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
2393 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); in set_sig_umr_wr()
2394 *seg += sizeof(struct mlx5_mkey_seg); in set_sig_umr_wr()
2396 if (unlikely((*seg == qp->sq.qend))) in set_sig_umr_wr()
2397 *seg = mlx5_get_send_wqe(qp, 0); in set_sig_umr_wr()
2399 ret = set_sig_data_segment(wr, qp, seg, size); in set_sig_umr_wr()
2408 u32 psv_idx, void **seg, int *size) in set_psv_wr() argument
2410 struct mlx5_seg_set_psv *psv_seg = *seg; in set_psv_wr()
2427 *seg += sizeof(*psv_seg); in set_psv_wr()
2435 void **seg, int *size) in set_reg_wr() argument
2446 set_reg_umr_seg(*seg, mr); in set_reg_wr()
2447 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_reg_wr()
2449 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
2450 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
2452 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
2453 *seg += sizeof(struct mlx5_mkey_seg); in set_reg_wr()
2455 if (unlikely((*seg == qp->sq.qend))) in set_reg_wr()
2456 *seg = mlx5_get_send_wqe(qp, 0); in set_reg_wr()
2458 set_reg_data_seg(*seg, mr, pd); in set_reg_wr()
2459 *seg += sizeof(struct mlx5_wqe_data_seg); in set_reg_wr()
2465 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) in set_linv_wr() argument
2467 set_linv_umr_seg(*seg); in set_linv_wr()
2468 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in set_linv_wr()
2470 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
2471 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
2472 set_linv_mkey_seg(*seg); in set_linv_wr()
2473 *seg += sizeof(struct mlx5_mkey_seg); in set_linv_wr()
2475 if (unlikely((*seg == qp->sq.qend))) in set_linv_wr()
2476 *seg = mlx5_get_send_wqe(qp, 0); in set_linv_wr()
2534 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, in begin_wqe() argument
2547 *seg = mlx5_get_send_wqe(qp, *idx); in begin_wqe()
2548 *ctrl = *seg; in begin_wqe()
2549 *(uint32_t *)(*seg + 8) = 0; in begin_wqe()
2557 *seg += sizeof(**ctrl); in begin_wqe()
2604 void *seg; in mlx5_ib_post_send() local
2629 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); in mlx5_ib_post_send()
2639 xrc = seg; in mlx5_ib_post_send()
2640 seg += sizeof(*xrc); in mlx5_ib_post_send()
2648 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
2650 seg += sizeof(struct mlx5_wqe_raddr_seg); in mlx5_ib_post_send()
2666 set_linv_wr(qp, &seg, &size); in mlx5_ib_post_send()
2674 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); in mlx5_ib_post_send()
2687 err = set_sig_umr_wr(wr, qp, &seg, &size); in mlx5_ib_post_send()
2703 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2713 mr->sig->psv_memory.psv_idx, &seg, in mlx5_ib_post_send()
2724 err = begin_wqe(qp, &seg, &ctrl, wr, in mlx5_ib_post_send()
2735 mr->sig->psv_wire.psv_idx, &seg, in mlx5_ib_post_send()
2758 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, in mlx5_ib_post_send()
2760 seg += sizeof(struct mlx5_wqe_raddr_seg); in mlx5_ib_post_send()
2772 set_datagram_seg(seg, wr); in mlx5_ib_post_send()
2773 seg += sizeof(struct mlx5_wqe_datagram_seg); in mlx5_ib_post_send()
2775 if (unlikely((seg == qend))) in mlx5_ib_post_send()
2776 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2787 set_reg_umr_segment(seg, wr); in mlx5_ib_post_send()
2788 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); in mlx5_ib_post_send()
2790 if (unlikely((seg == qend))) in mlx5_ib_post_send()
2791 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2792 set_reg_mkey_segment(seg, wr); in mlx5_ib_post_send()
2793 seg += sizeof(struct mlx5_mkey_seg); in mlx5_ib_post_send()
2795 if (unlikely((seg == qend))) in mlx5_ib_post_send()
2796 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2806 err = set_data_inl_seg(qp, wr, seg, &sz); in mlx5_ib_post_send()
2815 dpseg = seg; in mlx5_ib_post_send()
2818 seg = mlx5_get_send_wqe(qp, 0); in mlx5_ib_post_send()
2819 dpseg = seg; in mlx5_ib_post_send()