Lines Matching refs:req
543 struct mcast_req *req = NULL; in mlx4_ib_mcg_timeout_handler() local
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
551 list_del(&req->group_list); in mlx4_ib_mcg_timeout_handler()
552 list_del(&req->func_list); in mlx4_ib_mcg_timeout_handler()
553 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
555 kfree(req); in mlx4_ib_mcg_timeout_handler()
585 struct mcast_req *req) in handle_leave_req() argument
589 if (req->clean) in handle_leave_req()
590 leave_mask = group->func[req->func].join_state; in handle_leave_req()
592 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
594 leave_group(group, req->func, leave_mask); in handle_leave_req()
596 if (!req->clean) in handle_leave_req()
597 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_leave_req()
598 --group->func[req->func].num_pend_reqs; in handle_leave_req()
599 list_del(&req->group_list); in handle_leave_req()
600 list_del(&req->func_list); in handle_leave_req()
601 kfree(req); in handle_leave_req()
606 struct mcast_req *req) in handle_join_req() argument
611 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; in handle_join_req()
615 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); in handle_join_req()
617 join_group(group, req->func, join_mask); in handle_join_req()
619 --group->func[req->func].num_pend_reqs; in handle_join_req()
620 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_join_req()
621 list_del(&req->group_list); in handle_join_req()
622 list_del(&req->func_list); in handle_join_req()
623 kfree(req); in handle_join_req()
628 if (send_join_to_wire(group, &req->sa_mad)) { in handle_join_req()
629 --group->func[req->func].num_pend_reqs; in handle_join_req()
630 list_del(&req->group_list); in handle_join_req()
631 list_del(&req->func_list); in handle_join_req()
632 kfree(req); in handle_join_req()
645 struct mcast_req *req = NULL; in mlx4_ib_mcg_work_handler() local
674 req = list_first_entry(&group->pending_list, in mlx4_ib_mcg_work_handler()
677 if (req) { in mlx4_ib_mcg_work_handler()
678 send_reply_to_slave(req->func, group, &req->sa_mad, status); in mlx4_ib_mcg_work_handler()
679 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_work_handler()
680 list_del(&req->group_list); in mlx4_ib_mcg_work_handler()
681 list_del(&req->func_list); in mlx4_ib_mcg_work_handler()
682 kfree(req); in mlx4_ib_mcg_work_handler()
710 req = list_first_entry(&group->pending_list, struct mcast_req, in mlx4_ib_mcg_work_handler()
712 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; in mlx4_ib_mcg_work_handler()
718 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) in mlx4_ib_mcg_work_handler()
719 rc += handle_leave_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
721 rc += handle_join_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
751 struct mcast_req *req; in search_relocate_mgid0_group() local
769 req = list_first_entry(&group->pending_list, in search_relocate_mgid0_group()
771 --group->func[req->func].num_pend_reqs; in search_relocate_mgid0_group()
772 list_del(&req->group_list); in search_relocate_mgid0_group()
773 list_del(&req->func_list); in search_relocate_mgid0_group()
774 kfree(req); in search_relocate_mgid0_group()
873 static void queue_req(struct mcast_req *req) in queue_req() argument
875 struct mcast_group *group = req->group; in queue_req()
879 list_add_tail(&req->group_list, &group->pending_list); in queue_req()
880 list_add_tail(&req->func_list, &group->func[req->func].pending); in queue_req()
942 struct mcast_req *req; in mlx4_ib_mcg_multiplex_handler() local
952 req = kzalloc(sizeof *req, GFP_KERNEL); in mlx4_ib_mcg_multiplex_handler()
953 if (!req) in mlx4_ib_mcg_multiplex_handler()
956 req->func = slave; in mlx4_ib_mcg_multiplex_handler()
957 req->sa_mad = *sa_mad; in mlx4_ib_mcg_multiplex_handler()
963 kfree(req); in mlx4_ib_mcg_multiplex_handler()
972 kfree(req); in mlx4_ib_mcg_multiplex_handler()
976 req->group = group; in mlx4_ib_mcg_multiplex_handler()
977 queue_req(req); in mlx4_ib_mcg_multiplex_handler()
998 struct mcast_req *req = NULL; in sysfs_show_group() local
1013 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in sysfs_show_group()
1015 be64_to_cpu(req->sa_mad.mad_hdr.tid)); in sysfs_show_group()
1065 struct mcast_req *req, *tmp in force_clean_group() local
1067 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { in force_clean_group()
1068 list_del(&req->group_list); in force_clean_group()
1069 kfree(req); in force_clean_group()
1158 static void build_leave_mad(struct mcast_req *req) in build_leave_mad() argument
1160 struct ib_sa_mad *mad = &req->sa_mad; in build_leave_mad()
1168 struct mcast_req *req, *tmp, *group_first = NULL; in clear_pending_reqs() local
1175 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { in clear_pending_reqs()
1177 if (group_first == req && in clear_pending_reqs()
1186 list_del(&req->group_list); in clear_pending_reqs()
1187 list_del(&req->func_list); in clear_pending_reqs()
1188 kfree(req); in clear_pending_reqs()
1201 struct mcast_req *req; in push_deleteing_req() local
1207 req = kzalloc(sizeof *req, GFP_KERNEL); in push_deleteing_req()
1208 if (!req) { in push_deleteing_req()
1216 kfree(req); in push_deleteing_req()
1221 req->clean = 1; in push_deleteing_req()
1222 req->func = slave; in push_deleteing_req()
1223 req->group = group; in push_deleteing_req()
1225 build_leave_mad(req); in push_deleteing_req()
1226 queue_req(req); in push_deleteing_req()