Lines Matching refs:ctx
104 struct ucma_context *ctx; member
114 struct ucma_context *ctx; member
129 struct ucma_context *ctx; in _ucma_find_context() local
131 ctx = idr_find(&ctx_idr, id); in _ucma_find_context()
132 if (!ctx) in _ucma_find_context()
133 ctx = ERR_PTR(-ENOENT); in _ucma_find_context()
134 else if (ctx->file != file) in _ucma_find_context()
135 ctx = ERR_PTR(-EINVAL); in _ucma_find_context()
136 return ctx; in _ucma_find_context()
141 struct ucma_context *ctx; in ucma_get_ctx() local
144 ctx = _ucma_find_context(id, file); in ucma_get_ctx()
145 if (!IS_ERR(ctx)) { in ucma_get_ctx()
146 if (ctx->closing) in ucma_get_ctx()
147 ctx = ERR_PTR(-EIO); in ucma_get_ctx()
149 atomic_inc(&ctx->ref); in ucma_get_ctx()
152 return ctx; in ucma_get_ctx()
155 static void ucma_put_ctx(struct ucma_context *ctx) in ucma_put_ctx() argument
157 if (atomic_dec_and_test(&ctx->ref)) in ucma_put_ctx()
158 complete(&ctx->comp); in ucma_put_ctx()
171 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); in ucma_close_id() local
177 ucma_put_ctx(ctx); in ucma_close_id()
178 wait_for_completion(&ctx->comp); in ucma_close_id()
180 rdma_destroy_id(ctx->cm_id); in ucma_close_id()
185 struct ucma_context *ctx; in ucma_alloc_ctx() local
187 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); in ucma_alloc_ctx()
188 if (!ctx) in ucma_alloc_ctx()
191 INIT_WORK(&ctx->close_work, ucma_close_id); in ucma_alloc_ctx()
192 atomic_set(&ctx->ref, 1); in ucma_alloc_ctx()
193 init_completion(&ctx->comp); in ucma_alloc_ctx()
194 INIT_LIST_HEAD(&ctx->mc_list); in ucma_alloc_ctx()
195 ctx->file = file; in ucma_alloc_ctx()
198 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); in ucma_alloc_ctx()
200 if (ctx->id < 0) in ucma_alloc_ctx()
203 list_add_tail(&ctx->list, &file->ctx_list); in ucma_alloc_ctx()
204 return ctx; in ucma_alloc_ctx()
207 kfree(ctx); in ucma_alloc_ctx()
211 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) in ucma_alloc_multicast() argument
225 mc->ctx = ctx; in ucma_alloc_multicast()
226 list_add_tail(&mc->list, &ctx->mc_list); in ucma_alloc_multicast()
262 static void ucma_set_event_context(struct ucma_context *ctx, in ucma_set_event_context() argument
266 uevent->ctx = ctx; in ucma_set_event_context()
276 uevent->resp.uid = ctx->uid; in ucma_set_event_context()
277 uevent->resp.id = ctx->id; in ucma_set_event_context()
285 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler() local
289 if (ctx->destroying) in ucma_removal_event_handler()
298 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler()
300 ctx->closing = 1; in ucma_removal_event_handler()
302 queue_work(ctx->file->close_wq, &ctx->close_work); in ucma_removal_event_handler()
306 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) { in ucma_removal_event_handler()
311 queue_work(ctx->file->close_wq, &con_req_eve->close_work); in ucma_removal_event_handler()
324 struct ucma_context *ctx = cm_id->context; in ucma_event_handler() local
331 mutex_lock(&ctx->file->mut); in ucma_event_handler()
333 ucma_set_event_context(ctx, event, uevent); in ucma_event_handler()
343 if (!ctx->backlog) { in ucma_event_handler()
348 ctx->backlog--; in ucma_event_handler()
349 } else if (!ctx->uid || ctx->cm_id != cm_id) { in ucma_event_handler()
365 list_add_tail(&uevent->list, &ctx->file->event_list); in ucma_event_handler()
366 wake_up_interruptible(&ctx->file->poll_wait); in ucma_event_handler()
370 mutex_unlock(&ctx->file->mut); in ucma_event_handler()
377 struct ucma_context *ctx; in ucma_get_event() local
405 ctx = ucma_alloc_ctx(file); in ucma_get_event()
406 if (!ctx) { in ucma_get_event()
410 uevent->ctx->backlog++; in ucma_get_event()
411 ctx->cm_id = uevent->cm_id; in ucma_get_event()
412 ctx->cm_id->context = ctx; in ucma_get_event()
413 uevent->resp.id = ctx->id; in ucma_get_event()
423 uevent->ctx->events_reported++; in ucma_get_event()
455 struct ucma_context *ctx; in ucma_create_id() local
470 ctx = ucma_alloc_ctx(file); in ucma_create_id()
472 if (!ctx) in ucma_create_id()
475 ctx->uid = cmd.uid; in ucma_create_id()
476 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, in ucma_create_id()
477 ucma_event_handler, ctx, cmd.ps, qp_type); in ucma_create_id()
478 if (IS_ERR(ctx->cm_id)) { in ucma_create_id()
479 ret = PTR_ERR(ctx->cm_id); in ucma_create_id()
483 resp.id = ctx->id; in ucma_create_id()
492 rdma_destroy_id(ctx->cm_id); in ucma_create_id()
495 idr_remove(&ctx_idr, ctx->id); in ucma_create_id()
497 kfree(ctx); in ucma_create_id()
501 static void ucma_cleanup_multicast(struct ucma_context *ctx) in ucma_cleanup_multicast() argument
506 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { in ucma_cleanup_multicast()
518 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { in ucma_cleanup_mc_events()
538 static int ucma_free_ctx(struct ucma_context *ctx) in ucma_free_ctx() argument
545 ucma_cleanup_multicast(ctx); in ucma_free_ctx()
548 mutex_lock(&ctx->file->mut); in ucma_free_ctx()
549 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { in ucma_free_ctx()
550 if (uevent->ctx == ctx) in ucma_free_ctx()
553 list_del(&ctx->list); in ucma_free_ctx()
554 mutex_unlock(&ctx->file->mut); in ucma_free_ctx()
563 events_reported = ctx->events_reported; in ucma_free_ctx()
564 kfree(ctx); in ucma_free_ctx()
573 struct ucma_context *ctx; in ucma_destroy_id() local
583 ctx = _ucma_find_context(cmd.id, file); in ucma_destroy_id()
584 if (!IS_ERR(ctx)) in ucma_destroy_id()
585 idr_remove(&ctx_idr, ctx->id); in ucma_destroy_id()
588 if (IS_ERR(ctx)) in ucma_destroy_id()
589 return PTR_ERR(ctx); in ucma_destroy_id()
591 mutex_lock(&ctx->file->mut); in ucma_destroy_id()
592 ctx->destroying = 1; in ucma_destroy_id()
593 mutex_unlock(&ctx->file->mut); in ucma_destroy_id()
595 flush_workqueue(ctx->file->close_wq); in ucma_destroy_id()
599 if (!ctx->closing) { in ucma_destroy_id()
601 ucma_put_ctx(ctx); in ucma_destroy_id()
602 wait_for_completion(&ctx->comp); in ucma_destroy_id()
603 rdma_destroy_id(ctx->cm_id); in ucma_destroy_id()
608 resp.events_reported = ucma_free_ctx(ctx); in ucma_destroy_id()
620 struct ucma_context *ctx; in ucma_bind_ip() local
626 ctx = ucma_get_ctx(file, cmd.id); in ucma_bind_ip()
627 if (IS_ERR(ctx)) in ucma_bind_ip()
628 return PTR_ERR(ctx); in ucma_bind_ip()
630 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); in ucma_bind_ip()
631 ucma_put_ctx(ctx); in ucma_bind_ip()
640 struct ucma_context *ctx; in ucma_bind() local
650 ctx = ucma_get_ctx(file, cmd.id); in ucma_bind()
651 if (IS_ERR(ctx)) in ucma_bind()
652 return PTR_ERR(ctx); in ucma_bind()
654 ret = rdma_bind_addr(ctx->cm_id, addr); in ucma_bind()
655 ucma_put_ctx(ctx); in ucma_bind()
664 struct ucma_context *ctx; in ucma_resolve_ip() local
670 ctx = ucma_get_ctx(file, cmd.id); in ucma_resolve_ip()
671 if (IS_ERR(ctx)) in ucma_resolve_ip()
672 return PTR_ERR(ctx); in ucma_resolve_ip()
674 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, in ucma_resolve_ip()
677 ucma_put_ctx(ctx); in ucma_resolve_ip()
687 struct ucma_context *ctx; in ucma_resolve_addr() local
699 ctx = ucma_get_ctx(file, cmd.id); in ucma_resolve_addr()
700 if (IS_ERR(ctx)) in ucma_resolve_addr()
701 return PTR_ERR(ctx); in ucma_resolve_addr()
703 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); in ucma_resolve_addr()
704 ucma_put_ctx(ctx); in ucma_resolve_addr()
713 struct ucma_context *ctx; in ucma_resolve_route() local
719 ctx = ucma_get_ctx(file, cmd.id); in ucma_resolve_route()
720 if (IS_ERR(ctx)) in ucma_resolve_route()
721 return PTR_ERR(ctx); in ucma_resolve_route()
723 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); in ucma_resolve_route()
724 ucma_put_ctx(ctx); in ucma_resolve_route()
798 struct ucma_context *ctx; in ucma_query_route() local
808 ctx = ucma_get_ctx(file, cmd.id); in ucma_query_route()
809 if (IS_ERR(ctx)) in ucma_query_route()
810 return PTR_ERR(ctx); in ucma_query_route()
813 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; in ucma_query_route()
817 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; in ucma_query_route()
821 if (!ctx->cm_id->device) in ucma_query_route()
824 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; in ucma_query_route()
825 resp.port_num = ctx->cm_id->port_num; in ucma_query_route()
827 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
828 ucma_copy_ib_route(&resp, &ctx->cm_id->route); in ucma_query_route()
829 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
830 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); in ucma_query_route()
831 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) in ucma_query_route()
832 ucma_copy_iw_route(&resp, &ctx->cm_id->route); in ucma_query_route()
839 ucma_put_ctx(ctx); in ucma_query_route()
855 static ssize_t ucma_query_addr(struct ucma_context *ctx, in ucma_query_addr() argument
867 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; in ucma_query_addr()
871 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; in ucma_query_addr()
875 ucma_query_device_addr(ctx->cm_id, &resp); in ucma_query_addr()
883 static ssize_t ucma_query_path(struct ucma_context *ctx, in ucma_query_path() argument
896 resp->num_paths = ctx->cm_id->route.num_paths; in ucma_query_path()
903 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], in ucma_query_path()
915 static ssize_t ucma_query_gid(struct ucma_context *ctx, in ucma_query_gid() argument
927 ucma_query_device_addr(ctx->cm_id, &resp); in ucma_query_gid()
931 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { in ucma_query_gid()
932 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); in ucma_query_gid()
936 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, in ucma_query_gid()
938 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) in ucma_query_gid()
939 &ctx->cm_id->route.addr.src_addr); in ucma_query_gid()
944 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { in ucma_query_gid()
945 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); in ucma_query_gid()
949 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, in ucma_query_gid()
951 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) in ucma_query_gid()
952 &ctx->cm_id->route.addr.dst_addr); in ucma_query_gid()
966 struct ucma_context *ctx; in ucma_query() local
974 ctx = ucma_get_ctx(file, cmd.id); in ucma_query()
975 if (IS_ERR(ctx)) in ucma_query()
976 return PTR_ERR(ctx); in ucma_query()
980 ret = ucma_query_addr(ctx, response, out_len); in ucma_query()
983 ret = ucma_query_path(ctx, response, out_len); in ucma_query()
986 ret = ucma_query_gid(ctx, response, out_len); in ucma_query()
993 ucma_put_ctx(ctx); in ucma_query()
1018 struct ucma_context *ctx; in ucma_connect() local
1027 ctx = ucma_get_ctx(file, cmd.id); in ucma_connect()
1028 if (IS_ERR(ctx)) in ucma_connect()
1029 return PTR_ERR(ctx); in ucma_connect()
1031 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); in ucma_connect()
1032 ret = rdma_connect(ctx->cm_id, &conn_param); in ucma_connect()
1033 ucma_put_ctx(ctx); in ucma_connect()
1041 struct ucma_context *ctx; in ucma_listen() local
1047 ctx = ucma_get_ctx(file, cmd.id); in ucma_listen()
1048 if (IS_ERR(ctx)) in ucma_listen()
1049 return PTR_ERR(ctx); in ucma_listen()
1051 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? in ucma_listen()
1053 ret = rdma_listen(ctx->cm_id, ctx->backlog); in ucma_listen()
1054 ucma_put_ctx(ctx); in ucma_listen()
1063 struct ucma_context *ctx; in ucma_accept() local
1069 ctx = ucma_get_ctx(file, cmd.id); in ucma_accept()
1070 if (IS_ERR(ctx)) in ucma_accept()
1071 return PTR_ERR(ctx); in ucma_accept()
1074 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); in ucma_accept()
1076 ret = rdma_accept(ctx->cm_id, &conn_param); in ucma_accept()
1078 ctx->uid = cmd.uid; in ucma_accept()
1081 ret = rdma_accept(ctx->cm_id, NULL); in ucma_accept()
1083 ucma_put_ctx(ctx); in ucma_accept()
1091 struct ucma_context *ctx; in ucma_reject() local
1097 ctx = ucma_get_ctx(file, cmd.id); in ucma_reject()
1098 if (IS_ERR(ctx)) in ucma_reject()
1099 return PTR_ERR(ctx); in ucma_reject()
1101 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); in ucma_reject()
1102 ucma_put_ctx(ctx); in ucma_reject()
1110 struct ucma_context *ctx; in ucma_disconnect() local
1116 ctx = ucma_get_ctx(file, cmd.id); in ucma_disconnect()
1117 if (IS_ERR(ctx)) in ucma_disconnect()
1118 return PTR_ERR(ctx); in ucma_disconnect()
1120 ret = rdma_disconnect(ctx->cm_id); in ucma_disconnect()
1121 ucma_put_ctx(ctx); in ucma_disconnect()
1131 struct ucma_context *ctx; in ucma_init_qp_attr() local
1141 ctx = ucma_get_ctx(file, cmd.id); in ucma_init_qp_attr()
1142 if (IS_ERR(ctx)) in ucma_init_qp_attr()
1143 return PTR_ERR(ctx); in ucma_init_qp_attr()
1148 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); in ucma_init_qp_attr()
1158 ucma_put_ctx(ctx); in ucma_init_qp_attr()
1162 static int ucma_set_option_id(struct ucma_context *ctx, int optname, in ucma_set_option_id() argument
1173 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); in ucma_set_option_id()
1180 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); in ucma_set_option_id()
1187 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); in ucma_set_option_id()
1196 static int ucma_set_ib_path(struct ucma_context *ctx, in ucma_set_ib_path() argument
1218 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); in ucma_set_ib_path()
1224 return ucma_event_handler(ctx->cm_id, &event); in ucma_set_ib_path()
1227 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, in ucma_set_option_ib() argument
1234 ret = ucma_set_ib_path(ctx, optval, optlen); in ucma_set_option_ib()
1243 static int ucma_set_option_level(struct ucma_context *ctx, int level, in ucma_set_option_level() argument
1250 ret = ucma_set_option_id(ctx, optname, optval, optlen); in ucma_set_option_level()
1253 ret = ucma_set_option_ib(ctx, optname, optval, optlen); in ucma_set_option_level()
1266 struct ucma_context *ctx; in ucma_set_option() local
1273 ctx = ucma_get_ctx(file, cmd.id); in ucma_set_option()
1274 if (IS_ERR(ctx)) in ucma_set_option()
1275 return PTR_ERR(ctx); in ucma_set_option()
1284 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, in ucma_set_option()
1289 ucma_put_ctx(ctx); in ucma_set_option()
1297 struct ucma_context *ctx; in ucma_notify() local
1303 ctx = ucma_get_ctx(file, cmd.id); in ucma_notify()
1304 if (IS_ERR(ctx)) in ucma_notify()
1305 return PTR_ERR(ctx); in ucma_notify()
1307 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); in ucma_notify()
1308 ucma_put_ctx(ctx); in ucma_notify()
1316 struct ucma_context *ctx; in ucma_process_join() local
1328 ctx = ucma_get_ctx(file, cmd->id); in ucma_process_join()
1329 if (IS_ERR(ctx)) in ucma_process_join()
1330 return PTR_ERR(ctx); in ucma_process_join()
1333 mc = ucma_alloc_multicast(ctx); in ucma_process_join()
1341 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); in ucma_process_join()
1353 ucma_put_ctx(ctx); in ucma_process_join()
1357 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); in ucma_process_join()
1367 ucma_put_ctx(ctx); in ucma_process_join()
1422 else if (mc->ctx->file != file) in ucma_leave_multicast()
1424 else if (!atomic_inc_not_zero(&mc->ctx->ref)) in ucma_leave_multicast()
1435 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); in ucma_leave_multicast()
1436 mutex_lock(&mc->ctx->file->mut); in ucma_leave_multicast()
1439 mutex_unlock(&mc->ctx->file->mut); in ucma_leave_multicast()
1441 ucma_put_ctx(mc->ctx); in ucma_leave_multicast()
1475 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) in ucma_move_events() argument
1479 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) in ucma_move_events()
1480 if (uevent->ctx == ctx) in ucma_move_events()
1490 struct ucma_context *ctx; in ucma_migrate_id() local
1504 ctx = ucma_get_ctx(f.file->private_data, cmd.id); in ucma_migrate_id()
1505 if (IS_ERR(ctx)) { in ucma_migrate_id()
1506 ret = PTR_ERR(ctx); in ucma_migrate_id()
1510 cur_file = ctx->file; in ucma_migrate_id()
1512 resp.events_reported = ctx->events_reported; in ucma_migrate_id()
1523 list_move_tail(&ctx->list, &new_file->ctx_list); in ucma_migrate_id()
1524 ucma_move_events(ctx, new_file); in ucma_migrate_id()
1525 ctx->file = new_file; in ucma_migrate_id()
1526 resp.events_reported = ctx->events_reported; in ucma_migrate_id()
1536 ucma_put_ctx(ctx); in ucma_migrate_id()
1651 struct ucma_context *ctx, *tmp; in ucma_close() local
1654 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { in ucma_close()
1655 ctx->destroying = 1; in ucma_close()
1659 idr_remove(&ctx_idr, ctx->id); in ucma_close()
1668 if (!ctx->closing) { in ucma_close()
1673 rdma_destroy_id(ctx->cm_id); in ucma_close()
1678 ucma_free_ctx(ctx); in ucma_close()