Lines Matching refs:sqp
627 struct mlx4_ib_sqp *sqp; in create_qp_common() local
678 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); in create_qp_common()
679 if (!sqp) in create_qp_common()
681 qp = &sqp->qp; in create_qp_common()
1257 static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, in store_sqp_attrs() argument
1261 sqp->pkey_index = attr->pkey_index; in store_sqp_attrs()
1263 sqp->qkey = attr->qkey; in store_sqp_attrs()
1265 sqp->send_psn = attr->sq_psn; in store_sqp_attrs()
2033 static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, in build_sriov_qp0_header() argument
2037 struct mlx4_ib_dev *mdev = to_mdev(sqp->qp.ibqp.device); in build_sriov_qp0_header()
2059 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2062 ib_ud_header_init(send_size, 1, 0, 0, 0, 0, &sqp->ud_header); in build_sriov_qp0_header()
2064 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2065 sqp->ud_header.lrh.service_level = in build_sriov_qp0_header()
2067 sqp->ud_header.lrh.destination_lid = in build_sriov_qp0_header()
2069 sqp->ud_header.lrh.source_lid = in build_sriov_qp0_header()
2077 mlx->rlid = sqp->ud_header.lrh.destination_lid; in build_sriov_qp0_header()
2079 sqp->ud_header.lrh.virtual_lane = 0; in build_sriov_qp0_header()
2080 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header()
2081 ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); in build_sriov_qp0_header()
2082 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); in build_sriov_qp0_header()
2083 if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2084 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_sriov_qp0_header()
2086 sqp->ud_header.bth.destination_qpn = in build_sriov_qp0_header()
2087 cpu_to_be32(mdev->dev->caps.qp0_tunnel[sqp->qp.port - 1]); in build_sriov_qp0_header()
2089 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); in build_sriov_qp0_header()
2091 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2094 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2097 sqp->ud_header.deth.qkey = cpu_to_be32(qkey); in build_sriov_qp0_header()
2098 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
2100 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in build_sriov_qp0_header()
2101 sqp->ud_header.immediate_present = 0; in build_sriov_qp0_header()
2103 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); in build_sriov_qp0_header()
2115 memcpy(inl + 1, sqp->header_buf, header_size); in build_sriov_qp0_header()
2119 memcpy(inl + 1, sqp->header_buf, spc); in build_sriov_qp0_header()
2122 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); in build_sriov_qp0_header()
2156 static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, in build_mlx_header() argument
2159 struct ib_device *ib_dev = sqp->qp.ibqp.device; in build_mlx_header()
2180 is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
2205 ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); in build_mlx_header()
2208 sqp->ud_header.lrh.service_level = in build_mlx_header()
2210 sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; in build_mlx_header()
2211 sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); in build_mlx_header()
2215 sqp->ud_header.grh.traffic_class = in build_mlx_header()
2217 sqp->ud_header.grh.flow_label = in build_mlx_header()
2219 sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; in build_mlx_header()
2221 memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); in build_mlx_header()
2227 sqp->ud_header.grh.source_gid.global.subnet_prefix = in build_mlx_header()
2228 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2230 sqp->ud_header.grh.source_gid.global.interface_id = in build_mlx_header()
2231 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. in build_mlx_header()
2237 &sqp->ud_header.grh.source_gid); in build_mlx_header()
2239 memcpy(sqp->ud_header.grh.destination_gid.raw, in build_mlx_header()
2246 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
2247 (sqp->ud_header.lrh.destination_lid == in build_mlx_header()
2249 (sqp->ud_header.lrh.service_level << 8)); in build_mlx_header()
2252 mlx->rlid = sqp->ud_header.lrh.destination_lid; in build_mlx_header()
2257 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; in build_mlx_header()
2258 sqp->ud_header.immediate_present = 0; in build_mlx_header()
2261 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; in build_mlx_header()
2262 sqp->ud_header.immediate_present = 1; in build_mlx_header()
2263 sqp->ud_header.immediate_data = wr->ex.imm_data; in build_mlx_header()
2276 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); in build_mlx_header()
2283 u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]); in build_mlx_header()
2287 memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN); in build_mlx_header()
2290 memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN); in build_mlx_header()
2293 if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) in build_mlx_header()
2296 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); in build_mlx_header()
2298 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); in build_mlx_header()
2299 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); in build_mlx_header()
2302 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; in build_mlx_header()
2303 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) in build_mlx_header()
2304 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; in build_mlx_header()
2306 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); in build_mlx_header()
2307 if (!sqp->qp.ibqp.qp_num) in build_mlx_header()
2308 ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); in build_mlx_header()
2310 ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->wr.ud.pkey_index, &pkey); in build_mlx_header()
2311 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); in build_mlx_header()
2312 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); in build_mlx_header()
2313 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); in build_mlx_header()
2314 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? in build_mlx_header()
2315 sqp->qkey : wr->wr.ud.remote_qkey); in build_mlx_header()
2316 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); in build_mlx_header()
2318 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); in build_mlx_header()
2326 be32_to_cpu(((__be32 *) sqp->header_buf)[i])); in build_mlx_header()
2343 memcpy(inl + 1, sqp->header_buf, header_size); in build_mlx_header()
2347 memcpy(inl + 1, sqp->header_buf, spc); in build_mlx_header()
2350 memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); in build_mlx_header()