Lines Matching refs:group

50 #define mcg_warn_group(group, format, arg...) \  argument
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \ argument
55 pr_err(" %16s: " format, (group)->name, ## arg)
132 struct mcast_group *group; member
140 mcg_warn_group(group, "did not expect to reach zero\n"); \
162 struct mcast_group *group; in mcast_find() local
166 group = rb_entry(node, struct mcast_group, node); in mcast_find()
167 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
169 return group; in mcast_find()
180 struct mcast_group *group) in mcast_insert() argument
191 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
192 sizeof group->rec.mgid); in mcast_insert()
200 rb_link_node(&group->node, parent, link); in mcast_insert()
201 rb_insert_color(&group->node, &ctx->mcg_table); in mcast_insert()
248 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) in send_join_to_wire() argument
258 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; in send_join_to_wire()
261 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_join_to_wire()
262 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_join_to_wire()
264 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_join_to_wire()
268 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_join_to_wire()
275 static int send_leave_to_wire(struct mcast_group *group, u8 join_state) in send_leave_to_wire() argument
288 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); in send_leave_to_wire()
289 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ in send_leave_to_wire()
297 *sa_data = group->rec; in send_leave_to_wire()
300 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); in send_leave_to_wire()
302 group->state = MCAST_IDLE; in send_leave_to_wire()
307 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, in send_leave_to_wire()
314 static int send_reply_to_slave(int slave, struct mcast_group *group, in send_reply_to_slave() argument
337 *sa_data = group->rec; in send_reply_to_slave()
341 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); in send_reply_to_slave()
344 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); in send_reply_to_slave()
433 static int release_group(struct mcast_group *group, int from_timeout_handler) in release_group() argument
435 struct mlx4_ib_demux_ctx *ctx = group->demux; in release_group()
439 mutex_lock(&group->lock); in release_group()
440 if (atomic_dec_and_test(&group->refcount)) { in release_group()
442 if (group->state != MCAST_IDLE && in release_group()
443 !cancel_delayed_work(&group->timeout_work)) { in release_group()
444 atomic_inc(&group->refcount); in release_group()
445 mutex_unlock(&group->lock); in release_group()
451 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); in release_group()
453 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in release_group()
454 if (!list_empty(&group->pending_list)) in release_group()
455 mcg_warn_group(group, "releasing a group with non empty pending list\n"); in release_group()
457 rb_erase(&group->node, &ctx->mcg_table); in release_group()
458 list_del_init(&group->mgid0_list); in release_group()
459 mutex_unlock(&group->lock); in release_group()
461 kfree(group); in release_group()
464 mutex_unlock(&group->lock); in release_group()
470 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
476 group->members[i] += inc; in adjust_membership()
479 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
485 if (!group->members[i]) in get_leave_state()
488 return leave_state & (group->rec.scope_join_state & 7); in get_leave_state()
491 static int join_group(struct mcast_group *group, int slave, u8 join_mask) in join_group() argument
497 join_state = join_mask & (~group->func[slave].join_state); in join_group()
498 adjust_membership(group, join_state, 1); in join_group()
499 group->func[slave].join_state |= join_state; in join_group()
500 if (group->func[slave].state != MCAST_MEMBER && join_state) { in join_group()
501 group->func[slave].state = MCAST_MEMBER; in join_group()
507 static int leave_group(struct mcast_group *group, int slave, u8 leave_state) in leave_group() argument
511 adjust_membership(group, leave_state, -1); in leave_group()
512 group->func[slave].join_state &= ~leave_state; in leave_group()
513 if (!group->func[slave].join_state) { in leave_group()
514 group->func[slave].state = MCAST_NOT_MEMBER; in leave_group()
520 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) in check_leave() argument
522 if (group->func[slave].state != MCAST_MEMBER) in check_leave()
526 if (~group->func[slave].join_state & leave_mask) in check_leave()
538 struct mcast_group *group; in mlx4_ib_mcg_timeout_handler() local
541 group = container_of(delay, typeof(*group), timeout_work); in mlx4_ib_mcg_timeout_handler()
543 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
544 if (group->state == MCAST_JOIN_SENT) { in mlx4_ib_mcg_timeout_handler()
545 if (!list_empty(&group->pending_list)) { in mlx4_ib_mcg_timeout_handler()
546 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
549 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
550 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
552 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { in mlx4_ib_mcg_timeout_handler()
553 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
556 kfree(group); in mlx4_ib_mcg_timeout_handler()
559 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
561 mcg_warn_group(group, "DRIVER BUG\n"); in mlx4_ib_mcg_timeout_handler()
562 } else if (group->state == MCAST_LEAVE_SENT) { in mlx4_ib_mcg_timeout_handler()
563 if (group->rec.scope_join_state & 7) in mlx4_ib_mcg_timeout_handler()
564 group->rec.scope_join_state &= 0xf8; in mlx4_ib_mcg_timeout_handler()
565 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
566 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
567 if (release_group(group, 1)) in mlx4_ib_mcg_timeout_handler()
569 mutex_lock(&group->lock); in mlx4_ib_mcg_timeout_handler()
571 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); in mlx4_ib_mcg_timeout_handler()
572 group->state = MCAST_IDLE; in mlx4_ib_mcg_timeout_handler()
573 atomic_inc(&group->refcount); in mlx4_ib_mcg_timeout_handler()
574 if (!queue_work(group->demux->mcg_wq, &group->work)) in mlx4_ib_mcg_timeout_handler()
575 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_timeout_handler()
577 mutex_unlock(&group->lock); in mlx4_ib_mcg_timeout_handler()
580 static int handle_leave_req(struct mcast_group *group, u8 leave_mask, in handle_leave_req() argument
586 leave_mask = group->func[req->func].join_state; in handle_leave_req()
588 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
590 leave_group(group, req->func, leave_mask); in handle_leave_req()
593 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_leave_req()
594 --group->func[req->func].num_pend_reqs; in handle_leave_req()
601 static int handle_join_req(struct mcast_group *group, u8 join_mask, in handle_join_req() argument
604 u8 group_join_state = group->rec.scope_join_state & 7; in handle_join_req()
611 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); in handle_join_req()
613 join_group(group, req->func, join_mask); in handle_join_req()
615 --group->func[req->func].num_pend_reqs; in handle_join_req()
616 send_reply_to_slave(req->func, group, &req->sa_mad, status); in handle_join_req()
623 group->prev_state = group->state; in handle_join_req()
624 if (send_join_to_wire(group, &req->sa_mad)) { in handle_join_req()
625 --group->func[req->func].num_pend_reqs; in handle_join_req()
630 group->state = group->prev_state; in handle_join_req()
632 group->state = MCAST_JOIN_SENT; in handle_join_req()
640 struct mcast_group *group; in mlx4_ib_mcg_work_handler() local
648 group = container_of(work, typeof(*group), work); in mlx4_ib_mcg_work_handler()
650 mutex_lock(&group->lock); in mlx4_ib_mcg_work_handler()
656 if (group->state == MCAST_RESP_READY) { in mlx4_ib_mcg_work_handler()
658 cancel_delayed_work(&group->timeout_work); in mlx4_ib_mcg_work_handler()
659 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); in mlx4_ib_mcg_work_handler()
660 method = group->response_sa_mad.mad_hdr.method; in mlx4_ib_mcg_work_handler()
661 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { in mlx4_ib_mcg_work_handler()
662 …mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, g… in mlx4_ib_mcg_work_handler()
663 be64_to_cpu(group->response_sa_mad.mad_hdr.tid), in mlx4_ib_mcg_work_handler()
664 be64_to_cpu(group->last_req_tid)); in mlx4_ib_mcg_work_handler()
665 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
669 if (!list_empty(&group->pending_list)) in mlx4_ib_mcg_work_handler()
670 req = list_first_entry(&group->pending_list, in mlx4_ib_mcg_work_handler()
674 send_reply_to_slave(req->func, group, &req->sa_mad, status); in mlx4_ib_mcg_work_handler()
675 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_work_handler()
681 mcg_warn_group(group, "no request for failed join\n"); in mlx4_ib_mcg_work_handler()
682 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) in mlx4_ib_mcg_work_handler()
689 group->response_sa_mad.data)->scope_join_state & 7; in mlx4_ib_mcg_work_handler()
690 cur_join_state = group->rec.scope_join_state & 7; in mlx4_ib_mcg_work_handler()
698 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); in mlx4_ib_mcg_work_handler()
700 group->state = MCAST_IDLE; in mlx4_ib_mcg_work_handler()
705 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
706 req = list_first_entry(&group->pending_list, struct mcast_req, in mlx4_ib_mcg_work_handler()
715 rc += handle_leave_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
717 rc += handle_join_req(group, req_join_state, req); in mlx4_ib_mcg_work_handler()
721 if (group->state == MCAST_IDLE) { in mlx4_ib_mcg_work_handler()
722 req_join_state = get_leave_state(group); in mlx4_ib_mcg_work_handler()
724 group->rec.scope_join_state &= ~req_join_state; in mlx4_ib_mcg_work_handler()
725 group->prev_state = group->state; in mlx4_ib_mcg_work_handler()
726 if (send_leave_to_wire(group, req_join_state)) { in mlx4_ib_mcg_work_handler()
727 group->state = group->prev_state; in mlx4_ib_mcg_work_handler()
730 group->state = MCAST_LEAVE_SENT; in mlx4_ib_mcg_work_handler()
734 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) in mlx4_ib_mcg_work_handler()
736 mutex_unlock(&group->lock); in mlx4_ib_mcg_work_handler()
739 release_group(group, 0); in mlx4_ib_mcg_work_handler()
746 struct mcast_group *group = NULL, *cur_group; in search_relocate_mgid0_group() local
753 group = list_entry(pos, struct mcast_group, mgid0_list); in search_relocate_mgid0_group()
754 mutex_lock(&group->lock); in search_relocate_mgid0_group()
755 if (group->last_req_tid == tid) { in search_relocate_mgid0_group()
757 group->rec.mgid = *new_mgid; in search_relocate_mgid0_group()
758 sprintf(group->name, "%016llx%016llx", in search_relocate_mgid0_group()
759 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in search_relocate_mgid0_group()
760 be64_to_cpu(group->rec.mgid.global.interface_id)); in search_relocate_mgid0_group()
761 list_del_init(&group->mgid0_list); in search_relocate_mgid0_group()
762 cur_group = mcast_insert(ctx, group); in search_relocate_mgid0_group()
765 req = list_first_entry(&group->pending_list, in search_relocate_mgid0_group()
767 --group->func[req->func].num_pend_reqs; in search_relocate_mgid0_group()
771 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
773 release_group(group, 0); in search_relocate_mgid0_group()
777 atomic_inc(&group->refcount); in search_relocate_mgid0_group()
778 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in search_relocate_mgid0_group()
779 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
781 return group; in search_relocate_mgid0_group()
785 list_del(&group->mgid0_list); in search_relocate_mgid0_group()
786 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) in search_relocate_mgid0_group()
787 cancel_delayed_work_sync(&group->timeout_work); in search_relocate_mgid0_group()
789 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { in search_relocate_mgid0_group()
793 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
795 kfree(group); in search_relocate_mgid0_group()
799 mutex_unlock(&group->lock); in search_relocate_mgid0_group()
813 struct mcast_group *group, *cur_group; in acquire_group() local
819 group = mcast_find(ctx, mgid); in acquire_group()
820 if (group) in acquire_group()
827 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
828 if (!group) in acquire_group()
831 group->demux = ctx; in acquire_group()
832 group->rec.mgid = *mgid; in acquire_group()
833 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
834 INIT_LIST_HEAD(&group->mgid0_list); in acquire_group()
836 INIT_LIST_HEAD(&group->func[i].pending); in acquire_group()
837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); in acquire_group()
838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); in acquire_group()
839 mutex_init(&group->lock); in acquire_group()
840 sprintf(group->name, "%016llx%016llx", in acquire_group()
841 be64_to_cpu(group->rec.mgid.global.subnet_prefix), in acquire_group()
842 be64_to_cpu(group->rec.mgid.global.interface_id)); in acquire_group()
843 sysfs_attr_init(&group->dentry.attr); in acquire_group()
844 group->dentry.show = sysfs_show_group; in acquire_group()
845 group->dentry.store = NULL; in acquire_group()
846 group->dentry.attr.name = group->name; in acquire_group()
847 group->dentry.attr.mode = 0400; in acquire_group()
848 group->state = MCAST_IDLE; in acquire_group()
851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); in acquire_group()
855 cur_group = mcast_insert(ctx, group); in acquire_group()
858 kfree(group); in acquire_group()
862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); in acquire_group()
865 atomic_inc(&group->refcount); in acquire_group()
866 return group; in acquire_group()
871 struct mcast_group *group = req->group; in queue_req() local
873 atomic_inc(&group->refcount); /* for the request */ in queue_req()
874 atomic_inc(&group->refcount); /* for scheduling the work */ in queue_req()
875 list_add_tail(&req->group_list, &group->pending_list); in queue_req()
876 list_add_tail(&req->func_list, &group->func[req->func].pending); in queue_req()
878 if (!queue_work(group->demux->mcg_wq, &group->work)) in queue_req()
879 safe_atomic_dec(&group->refcount); in queue_req()
888 struct mcast_group *group; in mlx4_ib_mcg_demux_handler() local
894 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); in mlx4_ib_mcg_demux_handler()
896 if (IS_ERR(group)) { in mlx4_ib_mcg_demux_handler()
900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); in mlx4_ib_mcg_demux_handler()
902 group = NULL; in mlx4_ib_mcg_demux_handler()
905 if (!group) in mlx4_ib_mcg_demux_handler()
908 mutex_lock(&group->lock); in mlx4_ib_mcg_demux_handler()
909 group->response_sa_mad = *mad; in mlx4_ib_mcg_demux_handler()
910 group->prev_state = group->state; in mlx4_ib_mcg_demux_handler()
911 group->state = MCAST_RESP_READY; in mlx4_ib_mcg_demux_handler()
913 atomic_inc(&group->refcount); in mlx4_ib_mcg_demux_handler()
914 if (!queue_work(ctx->mcg_wq, &group->work)) in mlx4_ib_mcg_demux_handler()
915 safe_atomic_dec(&group->refcount); in mlx4_ib_mcg_demux_handler()
916 mutex_unlock(&group->lock); in mlx4_ib_mcg_demux_handler()
917 release_group(group, 0); in mlx4_ib_mcg_demux_handler()
937 struct mcast_group *group; in mlx4_ib_mcg_multiplex_handler() local
956 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); in mlx4_ib_mcg_multiplex_handler()
958 if (IS_ERR(group)) { in mlx4_ib_mcg_multiplex_handler()
960 return PTR_ERR(group); in mlx4_ib_mcg_multiplex_handler()
962 mutex_lock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
963 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { in mlx4_ib_mcg_multiplex_handler()
964 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
965 mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", in mlx4_ib_mcg_multiplex_handler()
967 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
971 ++group->func[slave].num_pend_reqs; in mlx4_ib_mcg_multiplex_handler()
972 req->group = group; in mlx4_ib_mcg_multiplex_handler()
974 mutex_unlock(&group->lock); in mlx4_ib_mcg_multiplex_handler()
975 release_group(group, 0); in mlx4_ib_mcg_multiplex_handler()
992 struct mcast_group *group = in sysfs_show_group() local
1000 if (group->state == MCAST_IDLE) in sysfs_show_group()
1001 sprintf(state_str, "%s", get_state_string(group->state)); in sysfs_show_group()
1004 get_state_string(group->state), in sysfs_show_group()
1005 be64_to_cpu(group->last_req_tid)); in sysfs_show_group()
1006 if (list_empty(&group->pending_list)) { in sysfs_show_group()
1009 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in sysfs_show_group()
1014 group->rec.scope_join_state & 0xf, in sysfs_show_group()
1015 group->members[2], group->members[1], group->members[0], in sysfs_show_group()
1016 atomic_read(&group->refcount), in sysfs_show_group()
1020 if (group->func[f].state == MCAST_MEMBER) in sysfs_show_group()
1022 f, group->func[f].join_state); in sysfs_show_group()
1026 be16_to_cpu(group->rec.pkey), in sysfs_show_group()
1027 be32_to_cpu(group->rec.qkey), in sysfs_show_group()
1028 (group->rec.mtusel_mtu & 0xc0) >> 6, in sysfs_show_group()
1029 group->rec.mtusel_mtu & 0x3f, in sysfs_show_group()
1030 group->rec.tclass, in sysfs_show_group()
1031 (group->rec.ratesel_rate & 0xc0) >> 6, in sysfs_show_group()
1032 group->rec.ratesel_rate & 0x3f, in sysfs_show_group()
1033 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28, in sysfs_show_group()
1034 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8, in sysfs_show_group()
1035 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff, in sysfs_show_group()
1036 group->rec.proxy_join); in sysfs_show_group()
1059 static void force_clean_group(struct mcast_group *group) in force_clean_group() argument
1063 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { in force_clean_group()
1067 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); in force_clean_group()
1068 rb_erase(&group->node, &group->demux->mcg_table); in force_clean_group()
1069 kfree(group); in force_clean_group()
1076 struct mcast_group *group; in _mlx4_ib_mcg_port_cleanup() local
1102 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1103 if (atomic_read(&group->refcount)) in _mlx4_ib_mcg_port_cleanup()
1104 …mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); in _mlx4_ib_mcg_port_cleanup()
1106 force_clean_group(group); in _mlx4_ib_mcg_port_cleanup()
1162 static void clear_pending_reqs(struct mcast_group *group, int vf) in clear_pending_reqs() argument
1168 if (!list_empty(&group->pending_list)) in clear_pending_reqs()
1169 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); in clear_pending_reqs()
1171 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { in clear_pending_reqs()
1174 (group->state == MCAST_JOIN_SENT || in clear_pending_reqs()
1175 group->state == MCAST_LEAVE_SENT)) { in clear_pending_reqs()
1176 clear = cancel_delayed_work(&group->timeout_work); in clear_pending_reqs()
1178 group->state = MCAST_IDLE; in clear_pending_reqs()
1181 --group->func[vf].num_pend_reqs; in clear_pending_reqs()
1185 atomic_dec(&group->refcount); in clear_pending_reqs()
1189 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { in clear_pending_reqs()
1190 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", in clear_pending_reqs()
1191 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); in clear_pending_reqs()
1195 static int push_deleteing_req(struct mcast_group *group, int slave) in push_deleteing_req() argument
1200 if (!group->func[slave].join_state) in push_deleteing_req()
1205 mcg_warn_group(group, "failed allocation - may leave stall groups\n"); in push_deleteing_req()
1209 if (!list_empty(&group->func[slave].pending)) { in push_deleteing_req()
1210 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); in push_deleteing_req()
1219 req->group = group; in push_deleteing_req()
1220 ++group->func[slave].num_pend_reqs; in push_deleteing_req()
1228 struct mcast_group *group; in clean_vf_mcast() local
1233 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
1234 mutex_lock(&group->lock); in clean_vf_mcast()
1235 if (atomic_read(&group->refcount)) { in clean_vf_mcast()
1237 clear_pending_reqs(group, slave); in clean_vf_mcast()
1238 push_deleteing_req(group, slave); in clean_vf_mcast()
1240 mutex_unlock(&group->lock); in clean_vf_mcast()