Lines Matching refs:group
118 struct mcast_group *group; member
134 struct mcast_group *group; in mcast_find() local
138 group = rb_entry(node, struct mcast_group, node); in mcast_find()
139 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); in mcast_find()
141 return group; in mcast_find()
152 struct mcast_group *group, in mcast_insert() argument
164 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, in mcast_insert()
165 sizeof group->rec.mgid); in mcast_insert()
175 rb_link_node(&group->node, parent, link); in mcast_insert()
176 rb_insert_color(&group->node, &port->table); in mcast_insert()
186 static void release_group(struct mcast_group *group) in release_group() argument
188 struct mcast_port *port = group->port; in release_group()
192 if (atomic_dec_and_test(&group->refcount)) { in release_group()
193 rb_erase(&group->node, &port->table); in release_group()
195 kfree(group); in release_group()
209 struct mcast_group *group = member->group; in queue_join() local
212 spin_lock_irqsave(&group->lock, flags); in queue_join()
213 list_add_tail(&member->list, &group->pending_list); in queue_join()
214 if (group->state == MCAST_IDLE) { in queue_join()
215 group->state = MCAST_BUSY; in queue_join()
216 atomic_inc(&group->refcount); in queue_join()
217 queue_work(mcast_wq, &group->work); in queue_join()
219 spin_unlock_irqrestore(&group->lock, flags); in queue_join()
228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) in adjust_membership() argument
234 group->members[i] += inc; in adjust_membership()
243 static u8 get_leave_state(struct mcast_group *group) in get_leave_state() argument
249 if (!group->members[i]) in get_leave_state()
252 return leave_state & group->rec.join_state; in get_leave_state()
330 static int send_join(struct mcast_group *group, struct mcast_member *member) in send_join() argument
332 struct mcast_port *port = group->port; in send_join()
335 group->last_join = member; in send_join()
340 3000, GFP_KERNEL, join_handler, group, in send_join()
341 &group->query); in send_join()
343 group->query_id = ret; in send_join()
349 static int send_leave(struct mcast_group *group, u8 leave_state) in send_leave() argument
351 struct mcast_port *port = group->port; in send_leave()
355 rec = group->rec; in send_leave()
357 group->leave_state = leave_state; in send_leave()
365 group, &group->query); in send_leave()
367 group->query_id = ret; in send_leave()
373 static void join_group(struct mcast_group *group, struct mcast_member *member, in join_group() argument
377 adjust_membership(group, join_state, 1); in join_group()
378 group->rec.join_state |= join_state; in join_group()
379 member->multicast.rec = group->rec; in join_group()
381 list_move(&member->list, &group->active_list); in join_group()
384 static int fail_join(struct mcast_group *group, struct mcast_member *member, in fail_join() argument
387 spin_lock_irq(&group->lock); in fail_join()
389 spin_unlock_irq(&group->lock); in fail_join()
393 static void process_group_error(struct mcast_group *group) in process_group_error() argument
399 if (group->state == MCAST_PKEY_EVENT) in process_group_error()
400 ret = ib_find_pkey(group->port->dev->device, in process_group_error()
401 group->port->port_num, in process_group_error()
402 be16_to_cpu(group->rec.pkey), &pkey_index); in process_group_error()
404 spin_lock_irq(&group->lock); in process_group_error()
405 if (group->state == MCAST_PKEY_EVENT && !ret && in process_group_error()
406 group->pkey_index == pkey_index) in process_group_error()
409 while (!list_empty(&group->active_list)) { in process_group_error()
410 member = list_entry(group->active_list.next, in process_group_error()
414 adjust_membership(group, member->multicast.rec.join_state, -1); in process_group_error()
416 spin_unlock_irq(&group->lock); in process_group_error()
423 spin_lock_irq(&group->lock); in process_group_error()
426 group->rec.join_state = 0; in process_group_error()
428 group->state = MCAST_BUSY; in process_group_error()
429 spin_unlock_irq(&group->lock); in process_group_error()
434 struct mcast_group *group; in mcast_work_handler() local
440 group = container_of(work, typeof(*group), work); in mcast_work_handler()
442 spin_lock_irq(&group->lock); in mcast_work_handler()
443 while (!list_empty(&group->pending_list) || in mcast_work_handler()
444 (group->state != MCAST_BUSY)) { in mcast_work_handler()
446 if (group->state != MCAST_BUSY) { in mcast_work_handler()
447 spin_unlock_irq(&group->lock); in mcast_work_handler()
448 process_group_error(group); in mcast_work_handler()
452 member = list_entry(group->pending_list.next, in mcast_work_handler()
458 if (join_state == (group->rec.join_state & join_state)) { in mcast_work_handler()
459 status = cmp_rec(&group->rec, &multicast->rec, in mcast_work_handler()
462 join_group(group, member, join_state); in mcast_work_handler()
465 spin_unlock_irq(&group->lock); in mcast_work_handler()
468 spin_unlock_irq(&group->lock); in mcast_work_handler()
469 status = send_join(group, member); in mcast_work_handler()
474 ret = fail_join(group, member, status); in mcast_work_handler()
480 spin_lock_irq(&group->lock); in mcast_work_handler()
483 join_state = get_leave_state(group); in mcast_work_handler()
485 group->rec.join_state &= ~join_state; in mcast_work_handler()
486 spin_unlock_irq(&group->lock); in mcast_work_handler()
487 if (send_leave(group, join_state)) in mcast_work_handler()
490 group->state = MCAST_IDLE; in mcast_work_handler()
491 spin_unlock_irq(&group->lock); in mcast_work_handler()
492 release_group(group); in mcast_work_handler()
499 static void process_join_error(struct mcast_group *group, int status) in process_join_error() argument
504 spin_lock_irq(&group->lock); in process_join_error()
505 member = list_entry(group->pending_list.next, in process_join_error()
507 if (group->last_join == member) { in process_join_error()
510 spin_unlock_irq(&group->lock); in process_join_error()
516 spin_unlock_irq(&group->lock); in process_join_error()
522 struct mcast_group *group = context; in join_handler() local
526 process_join_error(group, status); in join_handler()
529 ib_find_pkey(group->port->dev->device, group->port->port_num, in join_handler()
532 spin_lock_irq(&group->port->lock); in join_handler()
533 if (group->state == MCAST_BUSY && in join_handler()
534 group->pkey_index == MCAST_INVALID_PKEY_INDEX) in join_handler()
535 group->pkey_index = pkey_index; in join_handler()
536 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, in join_handler()
537 sizeof(group->rec.mgid)); in join_handler()
538 group->rec = *rec; in join_handler()
540 rb_erase(&group->node, &group->port->table); in join_handler()
541 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, in join_handler()
543 mcast_insert(group->port, group, is_mgid0); in join_handler()
545 spin_unlock_irq(&group->port->lock); in join_handler()
547 mcast_work_handler(&group->work); in join_handler()
553 struct mcast_group *group = context; in leave_handler() local
555 if (status && group->retries > 0 && in leave_handler()
556 !send_leave(group, group->leave_state)) in leave_handler()
557 group->retries--; in leave_handler()
559 mcast_work_handler(&group->work); in leave_handler()
565 struct mcast_group *group, *cur_group; in acquire_group() local
572 group = mcast_find(port, mgid); in acquire_group()
573 if (group) in acquire_group()
578 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
579 if (!group) in acquire_group()
582 group->retries = 3; in acquire_group()
583 group->port = port; in acquire_group()
584 group->rec.mgid = *mgid; in acquire_group()
585 group->pkey_index = MCAST_INVALID_PKEY_INDEX; in acquire_group()
586 INIT_LIST_HEAD(&group->pending_list); in acquire_group()
587 INIT_LIST_HEAD(&group->active_list); in acquire_group()
588 INIT_WORK(&group->work, mcast_work_handler); in acquire_group()
589 spin_lock_init(&group->lock); in acquire_group()
592 cur_group = mcast_insert(port, group, is_mgid0); in acquire_group()
594 kfree(group); in acquire_group()
595 group = cur_group; in acquire_group()
599 atomic_inc(&group->refcount); in acquire_group()
601 return group; in acquire_group()
643 member->group = acquire_group(&dev->port[port_num - dev->start_port], in ib_sa_join_multicast()
645 if (!member->group) { in ib_sa_join_multicast()
670 struct mcast_group *group; in ib_sa_free_multicast() local
673 group = member->group; in ib_sa_free_multicast()
675 spin_lock_irq(&group->lock); in ib_sa_free_multicast()
677 adjust_membership(group, multicast->rec.join_state, -1); in ib_sa_free_multicast()
681 if (group->state == MCAST_IDLE) { in ib_sa_free_multicast()
682 group->state = MCAST_BUSY; in ib_sa_free_multicast()
683 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
685 queue_work(mcast_wq, &group->work); in ib_sa_free_multicast()
687 spin_unlock_irq(&group->lock); in ib_sa_free_multicast()
688 release_group(group); in ib_sa_free_multicast()
703 struct mcast_group *group; in ib_sa_get_mcmember_rec() local
713 group = mcast_find(port, mgid); in ib_sa_get_mcmember_rec()
714 if (group) in ib_sa_get_mcmember_rec()
715 *rec = group->rec; in ib_sa_get_mcmember_rec()
758 struct mcast_group *group; in mcast_groups_event() local
764 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
765 spin_lock(&group->lock); in mcast_groups_event()
766 if (group->state == MCAST_IDLE) { in mcast_groups_event()
767 atomic_inc(&group->refcount); in mcast_groups_event()
768 queue_work(mcast_wq, &group->work); in mcast_groups_event()
770 if (group->state != MCAST_GROUP_ERROR) in mcast_groups_event()
771 group->state = state; in mcast_groups_event()
772 spin_unlock(&group->lock); in mcast_groups_event()