Lines Matching refs:resp

118 	struct rdma_ucm_event_resp resp;  member
272 uevent->resp.uid = uevent->mc->uid; in ucma_set_event_context()
273 uevent->resp.id = uevent->mc->id; in ucma_set_event_context()
276 uevent->resp.uid = ctx->uid; in ucma_set_event_context()
277 uevent->resp.id = ctx->id; in ucma_set_event_context()
308 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { in ucma_removal_event_handler()
334 uevent->resp.event = event->event; in ucma_event_handler()
335 uevent->resp.status = event->status; in ucma_event_handler()
337 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); in ucma_event_handler()
339 ucma_copy_conn_event(&uevent->resp.param.conn, in ucma_event_handler()
382 if (out_len < sizeof uevent->resp) in ucma_get_event()
404 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { in ucma_get_event()
413 uevent->resp.id = ctx->id; in ucma_get_event()
417 &uevent->resp, sizeof uevent->resp)) { in ucma_get_event()
454 struct rdma_ucm_create_id_resp resp; in ucma_create_id() local
459 if (out_len < sizeof(resp)) in ucma_create_id()
483 resp.id = ctx->id; in ucma_create_id()
485 &resp, sizeof(resp))) { in ucma_create_id()
558 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) in ucma_free_ctx()
572 struct rdma_ucm_destroy_id_resp resp; in ucma_destroy_id() local
576 if (out_len < sizeof(resp)) in ucma_destroy_id()
608 resp.events_reported = ucma_free_ctx(ctx); in ucma_destroy_id()
610 &resp, sizeof(resp))) in ucma_destroy_id()
728 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, in ucma_copy_ib_route() argument
733 resp->num_paths = route->num_paths; in ucma_copy_ib_route()
738 (union ib_gid *) &resp->ib_route[0].dgid); in ucma_copy_ib_route()
740 (union ib_gid *) &resp->ib_route[0].sgid); in ucma_copy_ib_route()
741 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); in ucma_copy_ib_route()
744 ib_copy_path_rec_to_user(&resp->ib_route[1], in ucma_copy_ib_route()
748 ib_copy_path_rec_to_user(&resp->ib_route[0], in ucma_copy_ib_route()
756 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, in ucma_copy_iboe_route() argument
760 resp->num_paths = route->num_paths; in ucma_copy_iboe_route()
764 (union ib_gid *)&resp->ib_route[0].dgid); in ucma_copy_iboe_route()
766 (union ib_gid *)&resp->ib_route[0].sgid); in ucma_copy_iboe_route()
767 resp->ib_route[0].pkey = cpu_to_be16(0xffff); in ucma_copy_iboe_route()
770 ib_copy_path_rec_to_user(&resp->ib_route[1], in ucma_copy_iboe_route()
774 ib_copy_path_rec_to_user(&resp->ib_route[0], in ucma_copy_iboe_route()
782 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, in ucma_copy_iw_route() argument
788 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); in ucma_copy_iw_route()
789 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); in ucma_copy_iw_route()
797 struct rdma_ucm_query_route_resp resp; in ucma_query_route() local
802 if (out_len < sizeof(resp)) in ucma_query_route()
812 memset(&resp, 0, sizeof resp); in ucma_query_route()
814 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? in ucma_query_route()
818 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? in ucma_query_route()
824 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; in ucma_query_route()
825 resp.port_num = ctx->cm_id->port_num; in ucma_query_route()
828 ucma_copy_ib_route(&resp, &ctx->cm_id->route); in ucma_query_route()
830 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); in ucma_query_route()
832 ucma_copy_iw_route(&resp, &ctx->cm_id->route); in ucma_query_route()
836 &resp, sizeof(resp))) in ucma_query_route()
844 struct rdma_ucm_query_addr_resp *resp) in ucma_query_device_addr() argument
849 resp->node_guid = (__force __u64) cm_id->device->node_guid; in ucma_query_device_addr()
850 resp->port_num = cm_id->port_num; in ucma_query_device_addr()
851 resp->pkey = (__force __u16) cpu_to_be16( in ucma_query_device_addr()
858 struct rdma_ucm_query_addr_resp resp; in ucma_query_addr() local
862 if (out_len < sizeof(resp)) in ucma_query_addr()
865 memset(&resp, 0, sizeof resp); in ucma_query_addr()
868 resp.src_size = rdma_addr_size(addr); in ucma_query_addr()
869 memcpy(&resp.src_addr, addr, resp.src_size); in ucma_query_addr()
872 resp.dst_size = rdma_addr_size(addr); in ucma_query_addr()
873 memcpy(&resp.dst_addr, addr, resp.dst_size); in ucma_query_addr()
875 ucma_query_device_addr(ctx->cm_id, &resp); in ucma_query_addr()
877 if (copy_to_user(response, &resp, sizeof(resp))) in ucma_query_addr()
886 struct rdma_ucm_query_path_resp *resp; in ucma_query_path() local
889 if (out_len < sizeof(*resp)) in ucma_query_path()
892 resp = kzalloc(out_len, GFP_KERNEL); in ucma_query_path()
893 if (!resp) in ucma_query_path()
896 resp->num_paths = ctx->cm_id->route.num_paths; in ucma_query_path()
897 for (i = 0, out_len -= sizeof(*resp); in ucma_query_path()
898 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); in ucma_query_path()
901 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | in ucma_query_path()
904 &resp->path_data[i].path_rec); in ucma_query_path()
907 if (copy_to_user(response, resp, in ucma_query_path()
908 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data)))) in ucma_query_path()
911 kfree(resp); in ucma_query_path()
918 struct rdma_ucm_query_addr_resp resp; in ucma_query_gid() local
922 if (out_len < sizeof(resp)) in ucma_query_gid()
925 memset(&resp, 0, sizeof resp); in ucma_query_gid()
927 ucma_query_device_addr(ctx->cm_id, &resp); in ucma_query_gid()
929 addr = (struct sockaddr_ib *) &resp.src_addr; in ucma_query_gid()
930 resp.src_size = sizeof(*addr); in ucma_query_gid()
932 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); in ucma_query_gid()
935 addr->sib_pkey = (__force __be16) resp.pkey; in ucma_query_gid()
942 addr = (struct sockaddr_ib *) &resp.dst_addr; in ucma_query_gid()
943 resp.dst_size = sizeof(*addr); in ucma_query_gid()
945 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); in ucma_query_gid()
948 addr->sib_pkey = (__force __be16) resp.pkey; in ucma_query_gid()
955 if (copy_to_user(response, &resp, sizeof(resp))) in ucma_query_gid()
1130 struct ib_uverbs_qp_attr resp; in ucma_init_qp_attr() local
1135 if (out_len < sizeof(resp)) in ucma_init_qp_attr()
1145 resp.qp_attr_mask = 0; in ucma_init_qp_attr()
1148 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); in ucma_init_qp_attr()
1152 ib_copy_qp_attr_to_user(&resp, &qp_attr); in ucma_init_qp_attr()
1154 &resp, sizeof(resp))) in ucma_init_qp_attr()
1315 struct rdma_ucm_create_id_resp resp; in ucma_process_join() local
1321 if (out_len < sizeof(resp)) in ucma_process_join()
1345 resp.id = mc->id; in ucma_process_join()
1347 &resp, sizeof(resp))) { in ucma_process_join()
1408 struct rdma_ucm_destroy_id_resp resp; in ucma_leave_multicast() local
1412 if (out_len < sizeof(resp)) in ucma_leave_multicast()
1442 resp.events_reported = mc->events_reported; in ucma_leave_multicast()
1446 &resp, sizeof(resp))) in ucma_leave_multicast()
1489 struct rdma_ucm_migrate_resp resp; in ucma_migrate_id() local
1512 resp.events_reported = ctx->events_reported; in ucma_migrate_id()
1526 resp.events_reported = ctx->events_reported; in ucma_migrate_id()
1533 &resp, sizeof(resp))) in ucma_migrate_id()