Lines Matching refs:packet
165 struct ib_umad_packet *packet) in queue_packet() argument
171 for (packet->mad.hdr.id = 0; in queue_packet()
172 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; in queue_packet()
173 packet->mad.hdr.id++) in queue_packet()
174 if (agent == __get_agent(file, packet->mad.hdr.id)) { in queue_packet()
175 list_add_tail(&packet->list, &file->recv_list); in queue_packet()
187 struct ib_umad_packet *packet) in dequeue_send() argument
190 list_del(&packet->list); in dequeue_send()
198 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; in send_handler() local
200 dequeue_send(file, packet); in send_handler()
201 ib_destroy_ah(packet->msg->ah); in send_handler()
202 ib_free_send_mad(packet->msg); in send_handler()
205 packet->length = IB_MGMT_MAD_HDR; in send_handler()
206 packet->mad.hdr.status = ETIMEDOUT; in send_handler()
207 if (!queue_packet(file, agent, packet)) in send_handler()
210 kfree(packet); in send_handler()
217 struct ib_umad_packet *packet; in recv_handler() local
222 packet = kzalloc(sizeof *packet, GFP_KERNEL); in recv_handler()
223 if (!packet) in recv_handler()
226 packet->length = mad_recv_wc->mad_len; in recv_handler()
227 packet->recv_wc = mad_recv_wc; in recv_handler()
229 packet->mad.hdr.status = 0; in recv_handler()
230 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; in recv_handler()
231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler()
232 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); in recv_handler()
233 packet->mad.hdr.sl = mad_recv_wc->wc->sl; in recv_handler()
234 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; in recv_handler()
235 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; in recv_handler()
236 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); in recv_handler()
237 if (packet->mad.hdr.grh_present) { in recv_handler()
244 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; in recv_handler()
245 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; in recv_handler()
246 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; in recv_handler()
247 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); in recv_handler()
248 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); in recv_handler()
251 if (queue_packet(file, agent, packet)) in recv_handler()
256 kfree(packet); in recv_handler()
262 struct ib_umad_packet *packet, size_t count) in copy_recv_mad() argument
268 recv_buf = &packet->recv_wc->recv_buf; in copy_recv_mad()
269 if ((packet->length <= sizeof (*recv_buf->mad) && in copy_recv_mad()
270 count < hdr_size(file) + packet->length) || in copy_recv_mad()
271 (packet->length > sizeof (*recv_buf->mad) && in copy_recv_mad()
275 if (copy_to_user(buf, &packet->mad, hdr_size(file))) in copy_recv_mad()
279 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); in copy_recv_mad()
283 if (seg_payload < packet->length) { in copy_recv_mad()
288 if (count < hdr_size(file) + packet->length) { in copy_recv_mad()
298 for (left = packet->length - seg_payload, buf += seg_payload; in copy_recv_mad()
308 return hdr_size(file) + packet->length; in copy_recv_mad()
312 struct ib_umad_packet *packet, size_t count) in copy_send_mad() argument
314 ssize_t size = hdr_size(file) + packet->length; in copy_send_mad()
319 if (copy_to_user(buf, &packet->mad, hdr_size(file))) in copy_send_mad()
324 if (copy_to_user(buf, packet->mad.data, packet->length)) in copy_send_mad()
334 struct ib_umad_packet *packet; in ib_umad_read() local
355 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); in ib_umad_read()
356 list_del(&packet->list); in ib_umad_read()
360 if (packet->recv_wc) in ib_umad_read()
361 ret = copy_recv_mad(file, buf, packet, count); in ib_umad_read()
363 ret = copy_send_mad(file, buf, packet, count); in ib_umad_read()
368 list_add(&packet->list, &file->recv_list); in ib_umad_read()
371 if (packet->recv_wc) in ib_umad_read()
372 ib_free_recv_mad(packet->recv_wc); in ib_umad_read()
373 kfree(packet); in ib_umad_read()
411 struct ib_umad_packet *packet) in is_duplicate() argument
416 hdr = (struct ib_mad_hdr *) packet->mad.data; in is_duplicate()
436 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) in is_duplicate()
447 struct ib_umad_packet *packet; in ib_umad_write() local
458 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); in ib_umad_write()
459 if (!packet) in ib_umad_write()
462 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { in ib_umad_write()
467 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { in ib_umad_write()
474 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { in ib_umad_write()
481 agent = __get_agent(file, packet->mad.hdr.id); in ib_umad_write()
488 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); in ib_umad_write()
489 ah_attr.sl = packet->mad.hdr.sl; in ib_umad_write()
490 ah_attr.src_path_bits = packet->mad.hdr.path_bits; in ib_umad_write()
492 if (packet->mad.hdr.grh_present) { in ib_umad_write()
494 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); in ib_umad_write()
495 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; in ib_umad_write()
496 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); in ib_umad_write()
497 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; in ib_umad_write()
498 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; in ib_umad_write()
507 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; in ib_umad_write()
521 packet->msg = ib_create_send_mad(agent, in ib_umad_write()
522 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write()
523 packet->mad.hdr.pkey_index, rmpp_active, in ib_umad_write()
525 if (IS_ERR(packet->msg)) { in ib_umad_write()
526 ret = PTR_ERR(packet->msg); in ib_umad_write()
530 packet->msg->ah = ah; in ib_umad_write()
531 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; in ib_umad_write()
532 packet->msg->retries = packet->mad.hdr.retries; in ib_umad_write()
533 packet->msg->context[0] = packet; in ib_umad_write()
536 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); in ib_umad_write()
539 if (copy_from_user(packet->msg->mad + copy_offset, in ib_umad_write()
546 ret = copy_rmpp_mad(packet->msg, buf); in ib_umad_write()
556 if (!ib_response_mad(packet->msg->mad)) { in ib_umad_write()
557 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; in ib_umad_write()
567 list_add_tail(&packet->list, &file->send_list); in ib_umad_write()
571 ret = is_duplicate(file, packet); in ib_umad_write()
573 list_add_tail(&packet->list, &file->send_list); in ib_umad_write()
581 ret = ib_post_send_mad(packet->msg, NULL); in ib_umad_write()
589 dequeue_send(file, packet); in ib_umad_write()
591 ib_free_send_mad(packet->msg); in ib_umad_write()
597 kfree(packet); in ib_umad_write()
969 struct ib_umad_packet *packet, *tmp; in ib_umad_close() local
979 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { in ib_umad_close()
980 if (packet->recv_wc) in ib_umad_close()
981 ib_free_recv_mad(packet->recv_wc); in ib_umad_close()
982 kfree(packet); in ib_umad_close()