/linux-4.1.27/arch/sparc/kernel/ |
H A D | hvapi.c | 18 unsigned long group; member in struct:api_info 27 { .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API }, 28 { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, 29 { .group = HV_GRP_INTR, }, 30 { .group = HV_GRP_SOFT_STATE, }, 31 { .group = HV_GRP_TM, }, 32 { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, 33 { .group = HV_GRP_LDOM, }, 34 { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, 35 { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, 36 { .group = HV_GRP_RNG, }, 37 { .group = HV_GRP_PBOOT, }, 38 { .group = HV_GRP_TPM, }, 39 { .group = HV_GRP_SDIO, }, 40 { .group = HV_GRP_SDIO_ERR, }, 41 { .group = HV_GRP_REBOOT_DATA, }, 42 { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, 43 { .group = HV_GRP_FIRE_PERF, }, 44 { .group = HV_GRP_N2_CPU, }, 45 { .group = HV_GRP_NIU, }, 46 { .group = HV_GRP_VF_CPU, }, 47 { .group = HV_GRP_KT_CPU, }, 48 { .group = HV_GRP_VT_CPU, }, 49 { .group = HV_GRP_T5_CPU, }, 50 { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, 51 { .group = HV_GRP_M7_PERF, }, 56 static struct api_info *__get_info(unsigned long group) __get_info() argument 61 if (api_table[i].group == group) __get_info() 77 sun4v_set_version(p->group, 0, 0, &ignore); __put_ref() 83 * API group and desired major+minor. 90 * API group/major/minor with the hypervisor, and errors returned 93 int sun4v_hvapi_register(unsigned long group, unsigned long major, sun4v_hvapi_register() argument 101 p = __get_info(group); sun4v_hvapi_register() 114 hv_ret = sun4v_set_version(group, major, *minor, sun4v_hvapi_register() 144 void sun4v_hvapi_unregister(unsigned long group) sun4v_hvapi_unregister() argument 150 p = __get_info(group); sun4v_hvapi_unregister() 157 int sun4v_hvapi_get(unsigned long group, sun4v_hvapi_get() argument 167 p = __get_info(group); sun4v_hvapi_get() 181 unsigned long group, major, minor; sun4v_hvapi_init() local 183 group = HV_GRP_SUN4V; sun4v_hvapi_init() 186 if (sun4v_hvapi_register(group, major, &minor)) sun4v_hvapi_init() 189 group = HV_GRP_CORE; sun4v_hvapi_init() 192 if (sun4v_hvapi_register(group, major, &minor)) sun4v_hvapi_init() 198 prom_printf("HVAPI: Cannot register API group " sun4v_hvapi_init() 200 group, major, minor); sun4v_hvapi_init()
|
/linux-4.1.27/fs/notify/ |
H A D | group.c | 32 * Final freeing of a group 34 static void fsnotify_final_destroy_group(struct fsnotify_group *group) fsnotify_final_destroy_group() argument 36 if (group->ops->free_group_priv) fsnotify_final_destroy_group() 37 group->ops->free_group_priv(group); fsnotify_final_destroy_group() 39 kfree(group); fsnotify_final_destroy_group() 43 * Trying to get rid of a group. Remove all marks, flush all events and release 44 * the group reference. 46 * hold a ref to the group. 48 void fsnotify_destroy_group(struct fsnotify_group *group) fsnotify_destroy_group() argument 50 /* clear all inode marks for this group */ fsnotify_destroy_group() 51 fsnotify_clear_marks_by_group(group); fsnotify_destroy_group() 56 fsnotify_flush_notify(group); fsnotify_destroy_group() 62 if (group->overflow_event) fsnotify_destroy_group() 63 group->ops->free_event(group->overflow_event); fsnotify_destroy_group() 65 fsnotify_put_group(group); fsnotify_destroy_group() 69 * Get reference to a group. 71 void fsnotify_get_group(struct fsnotify_group *group) fsnotify_get_group() argument 73 atomic_inc(&group->refcnt); fsnotify_get_group() 77 * Drop a reference to a group. Free it if it's through. 79 void fsnotify_put_group(struct fsnotify_group *group) fsnotify_put_group() argument 81 if (atomic_dec_and_test(&group->refcnt)) fsnotify_put_group() 82 fsnotify_final_destroy_group(group); fsnotify_put_group() 86 * Create a new fsnotify_group and hold a reference for the group returned. 90 struct fsnotify_group *group; fsnotify_alloc_group() local 92 group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL); fsnotify_alloc_group() 93 if (!group) fsnotify_alloc_group() 96 /* set to 0 when there a no external references to this group */ fsnotify_alloc_group() 97 atomic_set(&group->refcnt, 1); fsnotify_alloc_group() 98 atomic_set(&group->num_marks, 0); fsnotify_alloc_group() 100 mutex_init(&group->notification_mutex); fsnotify_alloc_group() 101 INIT_LIST_HEAD(&group->notification_list); fsnotify_alloc_group() 102 init_waitqueue_head(&group->notification_waitq); fsnotify_alloc_group() 103 group->max_events = UINT_MAX; fsnotify_alloc_group() 105 mutex_init(&group->mark_mutex); fsnotify_alloc_group() 106 INIT_LIST_HEAD(&group->marks_list); fsnotify_alloc_group() 108 group->ops = ops; fsnotify_alloc_group() 110 return group; fsnotify_alloc_group() 115 struct fsnotify_group *group = file->private_data; fsnotify_fasync() local 117 return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO; fsnotify_fasync()
|
H A D | notification.c | 20 * Basic idea behind the notification queue: An fsnotify group (like inotify) 23 * event to the group notify queue. Since a single event might need to be on 24 * multiple group's notification queues we can't add the event directly to each 30 * another group a new event_holder (from fsnotify_event_holder_cachep) will be 64 bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) fsnotify_notify_queue_is_empty() argument 66 BUG_ON(!mutex_is_locked(&group->notification_mutex)); fsnotify_notify_queue_is_empty() 67 return list_empty(&group->notification_list) ? true : false; fsnotify_notify_queue_is_empty() 70 void fsnotify_destroy_event(struct fsnotify_group *group, fsnotify_destroy_event() argument 73 /* Overflow events are per-group and we don't want to free them */ fsnotify_destroy_event() 78 group->ops->free_event(event); fsnotify_destroy_event() 82 * Add an event to the group notification queue. The group can later pull this 87 int fsnotify_add_event(struct fsnotify_group *group, fsnotify_add_event() argument 93 struct list_head *list = &group->notification_list; fsnotify_add_event() 95 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fsnotify_add_event() 97 mutex_lock(&group->notification_mutex); fsnotify_add_event() 99 if (group->q_len >= group->max_events) { fsnotify_add_event() 102 if (!list_empty(&group->overflow_event->list)) { fsnotify_add_event() 103 mutex_unlock(&group->notification_mutex); fsnotify_add_event() 106 event = group->overflow_event; fsnotify_add_event() 113 mutex_unlock(&group->notification_mutex); fsnotify_add_event() 119 group->q_len++; fsnotify_add_event() 121 mutex_unlock(&group->notification_mutex); fsnotify_add_event() 123 wake_up(&group->notification_waitq); fsnotify_add_event() 124 kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); fsnotify_add_event() 129 * Remove @event from group's notification queue. It is the responsibility of 132 void fsnotify_remove_event(struct fsnotify_group *group, fsnotify_remove_event() argument 135 mutex_lock(&group->notification_mutex); fsnotify_remove_event() 138 group->q_len--; fsnotify_remove_event() 140 mutex_unlock(&group->notification_mutex); fsnotify_remove_event() 147 struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group) fsnotify_remove_first_event() argument 151 BUG_ON(!mutex_is_locked(&group->notification_mutex)); fsnotify_remove_first_event() 153 pr_debug("%s: group=%p\n", __func__, group); fsnotify_remove_first_event() 155 event = list_first_entry(&group->notification_list, fsnotify_remove_first_event() 162 group->q_len--; fsnotify_remove_first_event() 171 struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group) fsnotify_peek_first_event() argument 173 BUG_ON(!mutex_is_locked(&group->notification_mutex)); fsnotify_peek_first_event() 175 return list_first_entry(&group->notification_list, fsnotify_peek_first_event() 180 * Called when a group is being torn down to clean up any outstanding 183 void fsnotify_flush_notify(struct fsnotify_group *group) fsnotify_flush_notify() argument 187 mutex_lock(&group->notification_mutex); fsnotify_flush_notify() 188 while (!fsnotify_notify_queue_is_empty(group)) { fsnotify_flush_notify() 189 event = fsnotify_remove_first_event(group); fsnotify_flush_notify() 190 fsnotify_destroy_event(group, event); fsnotify_flush_notify() 192 mutex_unlock(&group->notification_mutex); fsnotify_flush_notify() 197 * group's handle_event function if the group was interested in this
|
H A D | mark.c | 23 * The group->recnt and mark->refcnt tell how many "things" in the kernel 26 * the reference a group and a mark hold to each other. 34 * group->mark_mutex 38 * group->mark_mutex protects the marks_list anchored inside a given group and 40 * data (i.e group limits). 43 * Furthermore it protects the access to a reference of the group that the mark 70 * concurrent destroy_group by getting a ref to the marks group and taking the 73 * Very similarly for freeing by group, except we use free_g_list. 107 if (mark->group) fsnotify_put_mark() 108 fsnotify_put_group(mark->group); fsnotify_put_mark() 130 struct fsnotify_group *group) fsnotify_destroy_mark_locked() 134 BUG_ON(!mutex_is_locked(&group->mark_mutex)); fsnotify_destroy_mark_locked() 161 mutex_unlock(&group->mark_mutex); fsnotify_destroy_mark_locked() 169 * may have actually freed it, unless this group provides a 'freeing_mark' fsnotify_destroy_mark_locked() 175 * callback to the group function to let it know that this mark fsnotify_destroy_mark_locked() 178 if (group->ops->freeing_mark) fsnotify_destroy_mark_locked() 179 group->ops->freeing_mark(mark, group); fsnotify_destroy_mark_locked() 193 atomic_dec(&group->num_marks); fsnotify_destroy_mark_locked() 195 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); fsnotify_destroy_mark_locked() 199 struct fsnotify_group *group) fsnotify_destroy_mark() 201 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); fsnotify_destroy_mark() 202 fsnotify_destroy_mark_locked(mark, group); fsnotify_destroy_mark() 203 mutex_unlock(&group->mark_mutex); fsnotify_destroy_mark() 213 struct fsnotify_group *group; fsnotify_destroy_marks() local 217 fsnotify_get_group(mark->group); list_for_each_entry_safe() 218 group = mark->group; list_for_each_entry_safe() 221 fsnotify_destroy_mark(mark, group); list_for_each_entry_safe() 223 fsnotify_put_group(group); list_for_each_entry_safe() 248 * notification group). Events shall be passed to notification groups in 254 * and vfsmount marks of each group together. Using the group address as 297 if ((lmark->group == mark->group) && !allow_dups) hlist_for_each_entry() 300 cmp = fsnotify_compare_groups(lmark->group, mark->group); hlist_for_each_entry() 314 * Attach an initialized mark to a given group and fs object. 316 * event types should be delivered to which group. 319 struct fsnotify_group *group, struct inode *inode, fsnotify_add_mark_locked() 326 BUG_ON(!mutex_is_locked(&group->mark_mutex)); fsnotify_add_mark_locked() 330 * group->mark_mutex fsnotify_add_mark_locked() 337 fsnotify_get_group(group); fsnotify_add_mark_locked() 338 mark->group = group; fsnotify_add_mark_locked() 339 list_add(&mark->g_list, &group->marks_list); fsnotify_add_mark_locked() 340 atomic_inc(&group->num_marks); fsnotify_add_mark_locked() 344 ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups); fsnotify_add_mark_locked() 348 ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups); fsnotify_add_mark_locked() 366 fsnotify_put_group(group); fsnotify_add_mark_locked() 367 mark->group = NULL; fsnotify_add_mark_locked() 368 atomic_dec(&group->num_marks); fsnotify_add_mark_locked() 380 int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, fsnotify_add_mark() argument 384 mutex_lock(&group->mark_mutex); fsnotify_add_mark() 385 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups); fsnotify_add_mark() 386 mutex_unlock(&group->mark_mutex); fsnotify_add_mark() 391 * Given a list of marks, find the mark associated with given group. If found 395 struct fsnotify_group *group) fsnotify_find_mark() 400 if (mark->group == group) { hlist_for_each_entry() 409 * clear any marks in a group in which mark->flags & flags is true 411 void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, fsnotify_clear_marks_by_group_flags() argument 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); fsnotify_clear_marks_by_group_flags() 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { fsnotify_clear_marks_by_group_flags() 431 mutex_unlock(&group->mark_mutex); fsnotify_clear_marks_by_group_flags() 434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); fsnotify_clear_marks_by_group_flags() 436 mutex_unlock(&group->mark_mutex); fsnotify_clear_marks_by_group_flags() 441 fsnotify_destroy_mark_locked(mark, group); fsnotify_clear_marks_by_group_flags() 442 mutex_unlock(&group->mark_mutex); fsnotify_clear_marks_by_group_flags() 448 * Given a group, destroy all of the marks associated with that group. 450 void fsnotify_clear_marks_by_group(struct fsnotify_group *group) fsnotify_clear_marks_by_group() argument 452 fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1); fsnotify_clear_marks_by_group() 460 if (old->group) fsnotify_duplicate_mark() 461 fsnotify_get_group(old->group); fsnotify_duplicate_mark() 462 new->group = old->group; fsnotify_duplicate_mark() 129 fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group) fsnotify_destroy_mark_locked() argument 198 fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group) fsnotify_destroy_mark() argument 318 fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, struct vfsmount *mnt, int allow_dups) fsnotify_add_mark_locked() argument 394 fsnotify_find_mark(struct hlist_head *head, struct fsnotify_group *group) fsnotify_find_mark() argument
|
H A D | fdinfo.c | 27 struct fsnotify_group *group = f->private_data; show_fdinfo() local 30 mutex_lock(&group->mark_mutex); show_fdinfo() 31 list_for_each_entry(mark, &group->marks_list, g_list) { show_fdinfo() 36 mutex_unlock(&group->mark_mutex); show_fdinfo() 134 struct fsnotify_group *group = f->private_data; fanotify_show_fdinfo() local 137 switch (group->priority) { fanotify_show_fdinfo() 149 if (group->max_events == UINT_MAX) fanotify_show_fdinfo() 152 if (group->fanotify_data.max_marks == UINT_MAX) fanotify_show_fdinfo() 156 flags, group->fanotify_data.f_flags); fanotify_show_fdinfo()
|
H A D | vfsmount_mark.c | 51 void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) fsnotify_clear_vfsmount_marks_by_group() argument 53 fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT); fsnotify_clear_vfsmount_marks_by_group() 74 BUG_ON(!mutex_is_locked(&mark->group->mark_mutex)); fsnotify_destroy_vfsmount_mark() 87 * given a group and vfsmount, find the mark associated with that combination. 90 struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, fsnotify_find_vfsmount_mark() argument 97 mark = fsnotify_find_mark(&m->mnt_fsnotify_marks, group); fsnotify_find_vfsmount_mark() 104 * Attach an initialized mark to a given group and vfsmount. 109 struct fsnotify_group *group, struct vfsmount *mnt, fsnotify_add_vfsmount_mark() 117 BUG_ON(!mutex_is_locked(&group->mark_mutex)); fsnotify_add_vfsmount_mark() 108 fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct vfsmount *mnt, int allow_dups) fsnotify_add_vfsmount_mark() argument
|
H A D | fsnotify.h | 10 extern void fsnotify_flush_notify(struct fsnotify_group *group); 30 struct fsnotify_group *group, struct inode *inode, 34 struct fsnotify_group *group, struct vfsmount *mnt, 43 /* Find mark belonging to given group in the list of marks */ 45 struct fsnotify_group *group);
|
H A D | inode_mark.c | 50 BUG_ON(!mutex_is_locked(&mark->group->mark_mutex)); fsnotify_destroy_inode_mark() 88 * Given a group clear all of the inode marks associated with that group. 90 void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) fsnotify_clear_inode_marks_by_group() argument 92 fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE); fsnotify_clear_inode_marks_by_group() 96 * given a group and inode, find the mark associated with that combination. 99 struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, fsnotify_find_inode_mark() argument 105 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); fsnotify_find_inode_mark() 139 * event types should be delivered to which group and for which inodes. These 141 * the group's location in memory. 144 struct fsnotify_group *group, struct inode *inode, fsnotify_add_inode_mark() 151 BUG_ON(!mutex_is_locked(&group->mark_mutex)); fsnotify_add_inode_mark() 143 fsnotify_add_inode_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, int allow_dups) fsnotify_add_inode_mark() argument
|
H A D | fsnotify.c | 133 struct fsnotify_group *group = NULL; send_to_group() local 154 group = inode_mark->group; send_to_group() 163 group = vfsmount_mark->group; send_to_group() 170 pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p" send_to_group() 173 __func__, group, to_tell, mask, inode_mark, send_to_group() 180 return group->ops->handle_event(group, to_tell, inode_mark, send_to_group() 246 inode_group = inode_mark->group; fsnotify() 252 vfsmount_group = vfsmount_mark->group; fsnotify()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
H A D | mcg.c | 50 #define mcg_warn_group(group, format, arg...) \ 52 (group)->name, group->demux->port, ## arg) 54 #define mcg_error_group(group, format, arg...) \ 55 pr_err(" %16s: " format, (group)->name, ## arg) 132 struct mcast_group *group; member in struct:mcast_req 140 mcg_warn_group(group, "did not expect to reach zero\n"); \ 162 struct mcast_group *group; mcast_find() local 166 group = rb_entry(node, struct mcast_group, node); mcast_find() 167 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); mcast_find() 169 return group; mcast_find() 180 struct mcast_group *group) mcast_insert() 191 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, mcast_insert() 192 sizeof group->rec.mgid); mcast_insert() 200 rb_link_node(&group->node, parent, link); mcast_insert() 201 rb_insert_color(&group->node, &ctx->mcg_table); mcast_insert() 248 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad) send_join_to_wire() argument 258 sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0]; send_join_to_wire() 261 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); send_join_to_wire() 262 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ send_join_to_wire() 264 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); send_join_to_wire() 268 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, send_join_to_wire() 275 static int send_leave_to_wire(struct mcast_group *group, u8 join_state) send_leave_to_wire() argument 288 mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux); send_leave_to_wire() 289 group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */ send_leave_to_wire() 297 *sa_data = group->rec; send_leave_to_wire() 300 ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad); send_leave_to_wire() 302 group->state = MCAST_IDLE; send_leave_to_wire() 307 queue_delayed_work(group->demux->mcg_wq, &group->timeout_work, send_leave_to_wire() 314 static int send_reply_to_slave(int slave, struct mcast_group *group, send_reply_to_slave() argument 337 *sa_data = group->rec; send_reply_to_slave() 341 sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f); send_reply_to_slave() 344 ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad); send_reply_to_slave() 382 /* src is group record, dst is request record */ cmp_rec() 431 /* release group, return 1 if this was last release and group is destroyed 433 static int release_group(struct mcast_group *group, int from_timeout_handler) release_group() argument 435 struct mlx4_ib_demux_ctx *ctx = group->demux; release_group() 439 mutex_lock(&group->lock); release_group() 440 if (atomic_dec_and_test(&group->refcount)) { release_group() 442 if (group->state != MCAST_IDLE && release_group() 443 !cancel_delayed_work(&group->timeout_work)) { release_group() 444 atomic_inc(&group->refcount); release_group() 445 mutex_unlock(&group->lock); release_group() 451 nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0); release_group() 453 del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); release_group() 454 if (!list_empty(&group->pending_list)) release_group() 455 mcg_warn_group(group, "releasing a group with non empty pending list\n"); release_group() 457 rb_erase(&group->node, &ctx->mcg_table); release_group() 458 list_del_init(&group->mgid0_list); release_group() 459 mutex_unlock(&group->lock); release_group() 461 kfree(group); release_group() 464 mutex_unlock(&group->lock); release_group() 470 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) adjust_membership() argument 476 group->members[i] += inc; adjust_membership() 479 static u8 get_leave_state(struct mcast_group *group) get_leave_state() argument 485 if (!group->members[i]) get_leave_state() 488 return leave_state & (group->rec.scope_join_state & 7); get_leave_state() 491 static int join_group(struct mcast_group *group, int slave, u8 join_mask) join_group() argument 497 join_state = join_mask & (~group->func[slave].join_state); join_group() 498 adjust_membership(group, join_state, 1); join_group() 499 group->func[slave].join_state |= join_state; join_group() 500 if (group->func[slave].state != MCAST_MEMBER && join_state) { join_group() 501 group->func[slave].state = MCAST_MEMBER; join_group() 507 static int leave_group(struct mcast_group *group, int slave, u8 leave_state) leave_group() argument 511 adjust_membership(group, leave_state, -1); leave_group() 512 group->func[slave].join_state &= ~leave_state; leave_group() 513 if (!group->func[slave].join_state) { leave_group() 514 group->func[slave].state = MCAST_NOT_MEMBER; leave_group() 520 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask) check_leave() argument 522 if (group->func[slave].state != MCAST_MEMBER) check_leave() 526 if (~group->func[slave].join_state & leave_mask) check_leave() 538 struct mcast_group *group; mlx4_ib_mcg_timeout_handler() local 541 group = container_of(delay, typeof(*group), timeout_work); mlx4_ib_mcg_timeout_handler() 543 mutex_lock(&group->lock); mlx4_ib_mcg_timeout_handler() 544 if (group->state == MCAST_JOIN_SENT) { mlx4_ib_mcg_timeout_handler() 545 if (!list_empty(&group->pending_list)) { mlx4_ib_mcg_timeout_handler() 546 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); mlx4_ib_mcg_timeout_handler() 549 --group->func[req->func].num_pend_reqs; mlx4_ib_mcg_timeout_handler() 550 mutex_unlock(&group->lock); mlx4_ib_mcg_timeout_handler() 552 if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) { mlx4_ib_mcg_timeout_handler() 553 if (release_group(group, 1)) mlx4_ib_mcg_timeout_handler() 556 kfree(group); mlx4_ib_mcg_timeout_handler() 559 mutex_lock(&group->lock); mlx4_ib_mcg_timeout_handler() 561 mcg_warn_group(group, "DRIVER BUG\n"); mlx4_ib_mcg_timeout_handler() 562 } else if (group->state == MCAST_LEAVE_SENT) { mlx4_ib_mcg_timeout_handler() 563 if (group->rec.scope_join_state & 7) mlx4_ib_mcg_timeout_handler() 564 group->rec.scope_join_state &= 0xf8; mlx4_ib_mcg_timeout_handler() 565 group->state = MCAST_IDLE; mlx4_ib_mcg_timeout_handler() 566 mutex_unlock(&group->lock); mlx4_ib_mcg_timeout_handler() 567 if (release_group(group, 1)) mlx4_ib_mcg_timeout_handler() 569 mutex_lock(&group->lock); mlx4_ib_mcg_timeout_handler() 571 mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state)); mlx4_ib_mcg_timeout_handler() 572 group->state = MCAST_IDLE; mlx4_ib_mcg_timeout_handler() 573 atomic_inc(&group->refcount); mlx4_ib_mcg_timeout_handler() 574 if (!queue_work(group->demux->mcg_wq, &group->work)) mlx4_ib_mcg_timeout_handler() 575 safe_atomic_dec(&group->refcount); mlx4_ib_mcg_timeout_handler() 577 mutex_unlock(&group->lock); mlx4_ib_mcg_timeout_handler() 580 static int handle_leave_req(struct mcast_group *group, u8 leave_mask, handle_leave_req() argument 586 leave_mask = group->func[req->func].join_state; handle_leave_req() 588 status = check_leave(group, req->func, leave_mask); handle_leave_req() 590 leave_group(group, req->func, leave_mask); handle_leave_req() 593 send_reply_to_slave(req->func, group, &req->sa_mad, status); handle_leave_req() 594 --group->func[req->func].num_pend_reqs; handle_leave_req() 601 static int handle_join_req(struct mcast_group *group, u8 join_mask, handle_join_req() argument 604 u8 group_join_state = group->rec.scope_join_state & 7; handle_join_req() 611 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); handle_join_req() 613 join_group(group, req->func, join_mask); handle_join_req() 615 --group->func[req->func].num_pend_reqs; handle_join_req() 616 send_reply_to_slave(req->func, group, &req->sa_mad, status); handle_join_req() 623 group->prev_state = group->state; handle_join_req() 624 if (send_join_to_wire(group, &req->sa_mad)) { handle_join_req() 625 --group->func[req->func].num_pend_reqs; handle_join_req() 630 group->state = group->prev_state; handle_join_req() 632 group->state = MCAST_JOIN_SENT; handle_join_req() 640 struct mcast_group *group; mlx4_ib_mcg_work_handler() local 648 group = container_of(work, typeof(*group), work); mlx4_ib_mcg_work_handler() 650 mutex_lock(&group->lock); mlx4_ib_mcg_work_handler() 652 /* First, let's see if a response from SM is waiting regarding this group. mlx4_ib_mcg_work_handler() 653 * If so, we need to update the group's REC. If this is a bad response, we mlx4_ib_mcg_work_handler() 656 if (group->state == MCAST_RESP_READY) { mlx4_ib_mcg_work_handler() 658 cancel_delayed_work(&group->timeout_work); mlx4_ib_mcg_work_handler() 659 status = be16_to_cpu(group->response_sa_mad.mad_hdr.status); mlx4_ib_mcg_work_handler() 660 method = group->response_sa_mad.mad_hdr.method; mlx4_ib_mcg_work_handler() 661 if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) { mlx4_ib_mcg_work_handler() 662 mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n", mlx4_ib_mcg_work_handler() 663 be64_to_cpu(group->response_sa_mad.mad_hdr.tid), mlx4_ib_mcg_work_handler() 664 be64_to_cpu(group->last_req_tid)); mlx4_ib_mcg_work_handler() 665 group->state = group->prev_state; mlx4_ib_mcg_work_handler() 669 if (!list_empty(&group->pending_list)) mlx4_ib_mcg_work_handler() 670 req = list_first_entry(&group->pending_list, mlx4_ib_mcg_work_handler() 674 send_reply_to_slave(req->func, group, &req->sa_mad, status); mlx4_ib_mcg_work_handler() 675 --group->func[req->func].num_pend_reqs; mlx4_ib_mcg_work_handler() 681 mcg_warn_group(group, "no request for failed join\n"); mlx4_ib_mcg_work_handler() 682 } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing) mlx4_ib_mcg_work_handler() 689 group->response_sa_mad.data)->scope_join_state & 7; mlx4_ib_mcg_work_handler() 690 cur_join_state = group->rec.scope_join_state & 7; mlx4_ib_mcg_work_handler() 698 memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec); mlx4_ib_mcg_work_handler() 700 group->state = MCAST_IDLE; mlx4_ib_mcg_work_handler() 705 while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) { mlx4_ib_mcg_work_handler() 706 req = list_first_entry(&group->pending_list, struct mcast_req, mlx4_ib_mcg_work_handler() 715 rc += handle_leave_req(group, req_join_state, req); mlx4_ib_mcg_work_handler() 717 rc += handle_join_req(group, req_join_state, req); mlx4_ib_mcg_work_handler() 721 if (group->state == MCAST_IDLE) { mlx4_ib_mcg_work_handler() 722 req_join_state = get_leave_state(group); mlx4_ib_mcg_work_handler() 724 group->rec.scope_join_state &= ~req_join_state; mlx4_ib_mcg_work_handler() 725 group->prev_state = group->state; mlx4_ib_mcg_work_handler() 726 if (send_leave_to_wire(group, req_join_state)) { mlx4_ib_mcg_work_handler() 727 group->state = group->prev_state; mlx4_ib_mcg_work_handler() 730 group->state = MCAST_LEAVE_SENT; mlx4_ib_mcg_work_handler() 734 if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) mlx4_ib_mcg_work_handler() 736 mutex_unlock(&group->lock); mlx4_ib_mcg_work_handler() 739 release_group(group, 0); mlx4_ib_mcg_work_handler() 746 struct mcast_group *group = NULL, *cur_group; search_relocate_mgid0_group() local 753 group = list_entry(pos, struct mcast_group, mgid0_list); search_relocate_mgid0_group() 754 mutex_lock(&group->lock); search_relocate_mgid0_group() 755 if (group->last_req_tid == tid) { search_relocate_mgid0_group() 757 group->rec.mgid = *new_mgid; search_relocate_mgid0_group() 758 sprintf(group->name, "%016llx%016llx", search_relocate_mgid0_group() 759 be64_to_cpu(group->rec.mgid.global.subnet_prefix), search_relocate_mgid0_group() 760 be64_to_cpu(group->rec.mgid.global.interface_id)); search_relocate_mgid0_group() 761 list_del_init(&group->mgid0_list); search_relocate_mgid0_group() 762 cur_group = mcast_insert(ctx, group); search_relocate_mgid0_group() 765 req = list_first_entry(&group->pending_list, search_relocate_mgid0_group() 767 --group->func[req->func].num_pend_reqs; search_relocate_mgid0_group() 771 mutex_unlock(&group->lock); search_relocate_mgid0_group() 773 release_group(group, 0); search_relocate_mgid0_group() 777 atomic_inc(&group->refcount); search_relocate_mgid0_group() 778 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); search_relocate_mgid0_group() 779 mutex_unlock(&group->lock); search_relocate_mgid0_group() 781 return group; search_relocate_mgid0_group() 785 list_del(&group->mgid0_list); search_relocate_mgid0_group() 786 if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE) search_relocate_mgid0_group() 787 cancel_delayed_work_sync(&group->timeout_work); search_relocate_mgid0_group() 789 list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) { search_relocate_mgid0_group() 793 mutex_unlock(&group->lock); search_relocate_mgid0_group() 795 kfree(group); search_relocate_mgid0_group() 799 mutex_unlock(&group->lock); search_relocate_mgid0_group() 813 struct mcast_group *group, *cur_group; acquire_group() local 819 group = mcast_find(ctx, mgid); acquire_group() 820 if (group) acquire_group() 827 group = kzalloc(sizeof *group, gfp_mask); acquire_group() 828 if (!group) acquire_group() 831 group->demux = ctx; acquire_group() 832 group->rec.mgid = *mgid; acquire_group() 833 INIT_LIST_HEAD(&group->pending_list); acquire_group() 834 INIT_LIST_HEAD(&group->mgid0_list); acquire_group() 836 INIT_LIST_HEAD(&group->func[i].pending); acquire_group() 837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); acquire_group() 838 INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler); acquire_group() 839 mutex_init(&group->lock); acquire_group() 840 sprintf(group->name, "%016llx%016llx", acquire_group() 841 be64_to_cpu(group->rec.mgid.global.subnet_prefix), acquire_group() 842 be64_to_cpu(group->rec.mgid.global.interface_id)); acquire_group() 843 sysfs_attr_init(&group->dentry.attr); acquire_group() 844 group->dentry.show = sysfs_show_group; acquire_group() 845 group->dentry.store = NULL; acquire_group() 846 group->dentry.attr.name = group->name; acquire_group() 847 group->dentry.attr.mode = 0400; acquire_group() 848 group->state = MCAST_IDLE; acquire_group() 851 list_add(&group->mgid0_list, &ctx->mcg_mgid0_list); acquire_group() 855 cur_group = mcast_insert(ctx, group); acquire_group() 857 mcg_warn("group just showed up %s - confused\n", cur_group->name); acquire_group() 858 kfree(group); acquire_group() 862 add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr); acquire_group() 865 atomic_inc(&group->refcount); acquire_group() 866 return group; acquire_group() 871 struct mcast_group *group = req->group; queue_req() local 873 atomic_inc(&group->refcount); /* for the request */ queue_req() 874 atomic_inc(&group->refcount); /* for scheduling the work */ queue_req() 875 list_add_tail(&req->group_list, &group->pending_list); queue_req() 876 list_add_tail(&req->func_list, &group->func[req->func].pending); queue_req() 878 if (!queue_work(group->demux->mcg_wq, &group->work)) queue_req() 879 safe_atomic_dec(&group->refcount); queue_req() 888 struct mcast_group *group; mlx4_ib_mcg_demux_handler() local 894 group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); mlx4_ib_mcg_demux_handler() 896 if (IS_ERR(group)) { mlx4_ib_mcg_demux_handler() 899 *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */ mlx4_ib_mcg_demux_handler() 900 group = search_relocate_mgid0_group(ctx, tid, &rec->mgid); mlx4_ib_mcg_demux_handler() 902 group = NULL; mlx4_ib_mcg_demux_handler() 905 if (!group) mlx4_ib_mcg_demux_handler() 908 mutex_lock(&group->lock); mlx4_ib_mcg_demux_handler() 909 group->response_sa_mad = *mad; mlx4_ib_mcg_demux_handler() 910 group->prev_state = group->state; mlx4_ib_mcg_demux_handler() 911 group->state = MCAST_RESP_READY; mlx4_ib_mcg_demux_handler() 913 atomic_inc(&group->refcount); mlx4_ib_mcg_demux_handler() 914 if (!queue_work(ctx->mcg_wq, &group->work)) mlx4_ib_mcg_demux_handler() 915 safe_atomic_dec(&group->refcount); mlx4_ib_mcg_demux_handler() 916 mutex_unlock(&group->lock); mlx4_ib_mcg_demux_handler() 917 release_group(group, 0); mlx4_ib_mcg_demux_handler() 937 struct mcast_group *group; mlx4_ib_mcg_multiplex_handler() local 956 group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); mlx4_ib_mcg_multiplex_handler() 958 if (IS_ERR(group)) { mlx4_ib_mcg_multiplex_handler() 960 return PTR_ERR(group); mlx4_ib_mcg_multiplex_handler() 962 mutex_lock(&group->lock); mlx4_ib_mcg_multiplex_handler() 963 if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) { mlx4_ib_mcg_multiplex_handler() 964 mutex_unlock(&group->lock); mlx4_ib_mcg_multiplex_handler() 965 mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n", mlx4_ib_mcg_multiplex_handler() 967 release_group(group, 0); mlx4_ib_mcg_multiplex_handler() 971 ++group->func[slave].num_pend_reqs; mlx4_ib_mcg_multiplex_handler() 972 req->group = group; mlx4_ib_mcg_multiplex_handler() 974 mutex_unlock(&group->lock); mlx4_ib_mcg_multiplex_handler() 975 release_group(group, 0); mlx4_ib_mcg_multiplex_handler() 992 struct mcast_group *group = sysfs_show_group() local 1000 if (group->state == MCAST_IDLE) sysfs_show_group() 1001 sprintf(state_str, "%s", get_state_string(group->state)); sysfs_show_group() 1004 get_state_string(group->state), sysfs_show_group() 1005 be64_to_cpu(group->last_req_tid)); sysfs_show_group() 1006 if (list_empty(&group->pending_list)) { sysfs_show_group() 1009 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); sysfs_show_group() 1014 group->rec.scope_join_state & 0xf, sysfs_show_group() 1015 group->members[2], group->members[1], group->members[0], sysfs_show_group() 1016 atomic_read(&group->refcount), sysfs_show_group() 1020 if (group->func[f].state == MCAST_MEMBER) sysfs_show_group() 1022 f, group->func[f].join_state); sysfs_show_group() 1026 be16_to_cpu(group->rec.pkey), sysfs_show_group() 1027 be32_to_cpu(group->rec.qkey), sysfs_show_group() 1028 (group->rec.mtusel_mtu & 0xc0) >> 6, sysfs_show_group() 1029 group->rec.mtusel_mtu & 0x3f, sysfs_show_group() 1030 group->rec.tclass, sysfs_show_group() 1031 (group->rec.ratesel_rate & 0xc0) >> 6, sysfs_show_group() 1032 group->rec.ratesel_rate & 0x3f, sysfs_show_group() 1033 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28, sysfs_show_group() 1034 (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8, sysfs_show_group() 1035 be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff, sysfs_show_group() 1036 group->rec.proxy_join); sysfs_show_group() 1059 static void force_clean_group(struct mcast_group *group) force_clean_group() argument 1063 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { force_clean_group() 1067 del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr); force_clean_group() 1068 rb_erase(&group->node, &group->demux->mcg_table); force_clean_group() 1069 kfree(group); force_clean_group() 1076 struct mcast_group *group; _mlx4_ib_mcg_port_cleanup() local 1102 group = rb_entry(p, struct mcast_group, node); _mlx4_ib_mcg_port_cleanup() 1103 if (atomic_read(&group->refcount)) _mlx4_ib_mcg_port_cleanup() 1104 mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group); _mlx4_ib_mcg_port_cleanup() 1106 force_clean_group(group); _mlx4_ib_mcg_port_cleanup() 1162 static void clear_pending_reqs(struct mcast_group *group, int vf) clear_pending_reqs() argument 1168 if (!list_empty(&group->pending_list)) clear_pending_reqs() 1169 group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list); clear_pending_reqs() 1171 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { clear_pending_reqs() 1174 (group->state == MCAST_JOIN_SENT || clear_pending_reqs() 1175 group->state == MCAST_LEAVE_SENT)) { clear_pending_reqs() 1176 clear = cancel_delayed_work(&group->timeout_work); clear_pending_reqs() 1178 group->state = MCAST_IDLE; clear_pending_reqs() 1181 --group->func[vf].num_pend_reqs; clear_pending_reqs() 1185 atomic_dec(&group->refcount); clear_pending_reqs() 1189 if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) { clear_pending_reqs() 1190 mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n", clear_pending_reqs() 1191 list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs); clear_pending_reqs() 1195 static int push_deleteing_req(struct mcast_group *group, int slave) push_deleteing_req() argument 1200 if (!group->func[slave].join_state) push_deleteing_req() 1205 mcg_warn_group(group, "failed allocation - may leave stall groups\n"); push_deleteing_req() 1209 if (!list_empty(&group->func[slave].pending)) { push_deleteing_req() 1210 pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list); push_deleteing_req() 1219 req->group = group; push_deleteing_req() 1220 ++group->func[slave].num_pend_reqs; push_deleteing_req() 1228 struct mcast_group *group; clean_vf_mcast() local 1233 group = rb_entry(p, struct mcast_group, node); clean_vf_mcast() 1234 mutex_lock(&group->lock); clean_vf_mcast() 1235 if (atomic_read(&group->refcount)) { clean_vf_mcast() 1237 clear_pending_reqs(group, slave); clean_vf_mcast() 1238 push_deleteing_req(group, slave); clean_vf_mcast() 1240 mutex_unlock(&group->lock); clean_vf_mcast() 179 mcast_insert(struct mlx4_ib_demux_ctx *ctx, struct mcast_group *group) mcast_insert() argument
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | multicast.c | 118 struct mcast_group *group; member in struct:mcast_member 134 struct mcast_group *group; mcast_find() local 138 group = rb_entry(node, struct mcast_group, node); mcast_find() 139 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); mcast_find() 141 return group; mcast_find() 152 struct mcast_group *group, mcast_insert() 164 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, mcast_insert() 165 sizeof group->rec.mgid); mcast_insert() 175 rb_link_node(&group->node, parent, link); mcast_insert() 176 rb_insert_color(&group->node, &port->table); mcast_insert() 186 static void release_group(struct mcast_group *group) release_group() argument 188 struct mcast_port *port = group->port; release_group() 192 if (atomic_dec_and_test(&group->refcount)) { release_group() 193 rb_erase(&group->node, &port->table); release_group() 195 kfree(group); release_group() 209 struct mcast_group *group = member->group; queue_join() local 212 spin_lock_irqsave(&group->lock, flags); queue_join() 213 list_add_tail(&member->list, &group->pending_list); queue_join() 214 if (group->state == MCAST_IDLE) { queue_join() 215 group->state = MCAST_BUSY; queue_join() 216 atomic_inc(&group->refcount); queue_join() 217 queue_work(mcast_wq, &group->work); queue_join() 219 spin_unlock_irqrestore(&group->lock, flags); queue_join() 223 * A multicast group has three types of members: full member, non member, and 228 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) adjust_membership() argument 234 group->members[i] += inc; adjust_membership() 238 * If a multicast group has zero members left for a particular join state, but 239 * the group is still a member with the SA, we need to leave that join state. 243 static u8 get_leave_state(struct mcast_group *group) get_leave_state() argument 249 if (!group->members[i]) get_leave_state() 252 return leave_state & group->rec.join_state; get_leave_state() 330 static int send_join(struct mcast_group *group, struct mcast_member *member) send_join() argument 332 struct mcast_port *port = group->port; send_join() 335 group->last_join = member; send_join() 340 3000, GFP_KERNEL, join_handler, group, send_join() 341 &group->query); send_join() 343 group->query_id = ret; send_join() 349 static int send_leave(struct mcast_group *group, u8 leave_state) send_leave() argument 351 struct mcast_port *port = group->port; send_leave() 355 rec = group->rec; send_leave() 357 group->leave_state = leave_state; send_leave() 365 group, &group->query); send_leave() 367 group->query_id = ret; send_leave() 373 static void join_group(struct mcast_group *group, struct mcast_member *member, join_group() argument 377 adjust_membership(group, join_state, 1); join_group() 378 group->rec.join_state |= join_state; join_group() 379 member->multicast.rec = group->rec; join_group() 381 list_move(&member->list, &group->active_list); join_group() 384 static int fail_join(struct mcast_group *group, struct mcast_member *member, fail_join() argument 387 spin_lock_irq(&group->lock); fail_join() 389 spin_unlock_irq(&group->lock); fail_join() 393 static void process_group_error(struct mcast_group *group) process_group_error() argument 399 if (group->state == MCAST_PKEY_EVENT) process_group_error() 400 ret = ib_find_pkey(group->port->dev->device, process_group_error() 401 group->port->port_num, process_group_error() 402 be16_to_cpu(group->rec.pkey), &pkey_index); process_group_error() 404 spin_lock_irq(&group->lock); process_group_error() 405 if (group->state == MCAST_PKEY_EVENT && !ret && process_group_error() 406 group->pkey_index == pkey_index) process_group_error() 409 while (!list_empty(&group->active_list)) { process_group_error() 410 member = list_entry(group->active_list.next, process_group_error() 414 adjust_membership(group, member->multicast.rec.join_state, -1); process_group_error() 416 spin_unlock_irq(&group->lock); process_group_error() 423 spin_lock_irq(&group->lock); process_group_error() 426 group->rec.join_state = 0; process_group_error() 428 group->state = MCAST_BUSY; process_group_error() 429 spin_unlock_irq(&group->lock); process_group_error() 434 struct mcast_group *group; mcast_work_handler() local 440 group = container_of(work, typeof(*group), work); mcast_work_handler() 442 spin_lock_irq(&group->lock); mcast_work_handler() 443 while (!list_empty(&group->pending_list) || mcast_work_handler() 444 (group->state != MCAST_BUSY)) { mcast_work_handler() 446 if (group->state != MCAST_BUSY) { mcast_work_handler() 447 spin_unlock_irq(&group->lock); mcast_work_handler() 448 process_group_error(group); mcast_work_handler() 452 member = list_entry(group->pending_list.next, mcast_work_handler() 458 if (join_state == (group->rec.join_state & join_state)) { mcast_work_handler() 459 status = cmp_rec(&group->rec, &multicast->rec, mcast_work_handler() 462 join_group(group, member, join_state); mcast_work_handler() 465 spin_unlock_irq(&group->lock); mcast_work_handler() 468 spin_unlock_irq(&group->lock); mcast_work_handler() 469 status = send_join(group, member); mcast_work_handler() 474 ret = fail_join(group, member, status); mcast_work_handler() 480 spin_lock_irq(&group->lock); mcast_work_handler() 483 join_state = get_leave_state(group); mcast_work_handler() 485 group->rec.join_state &= ~join_state; mcast_work_handler() 486 spin_unlock_irq(&group->lock); mcast_work_handler() 487 if (send_leave(group, join_state)) mcast_work_handler() 490 group->state = MCAST_IDLE; mcast_work_handler() 491 spin_unlock_irq(&group->lock); mcast_work_handler() 492 release_group(group); mcast_work_handler() 499 static void process_join_error(struct mcast_group *group, int status) process_join_error() argument 504 spin_lock_irq(&group->lock); process_join_error() 505 member = list_entry(group->pending_list.next, process_join_error() 507 if (group->last_join == member) { process_join_error() 510 spin_unlock_irq(&group->lock); process_join_error() 516 spin_unlock_irq(&group->lock); process_join_error() 522 struct mcast_group *group = context; join_handler() local 526 process_join_error(group, status); join_handler() 529 ib_find_pkey(group->port->dev->device, group->port->port_num, join_handler() 532 spin_lock_irq(&group->port->lock); join_handler() 533 if (group->state == MCAST_BUSY && join_handler() 534 group->pkey_index == MCAST_INVALID_PKEY_INDEX) join_handler() 535 group->pkey_index = pkey_index; join_handler() 536 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, join_handler() 537 sizeof(group->rec.mgid)); join_handler() 538 group->rec = *rec; join_handler() 540 rb_erase(&group->node, &group->port->table); join_handler() 541 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, join_handler() 543 mcast_insert(group->port, group, is_mgid0); join_handler() 545 spin_unlock_irq(&group->port->lock); join_handler() 547 mcast_work_handler(&group->work); join_handler() 553 struct mcast_group *group = context; leave_handler() local 555 if (status && group->retries > 0 && leave_handler() 556 !send_leave(group, group->leave_state)) leave_handler() 557 group->retries--; leave_handler() 559 mcast_work_handler(&group->work); leave_handler() 565 struct mcast_group *group, *cur_group; acquire_group() local 572 group = mcast_find(port, mgid); acquire_group() 573 if (group) acquire_group() 578 group = kzalloc(sizeof *group, gfp_mask); acquire_group() 579 if (!group) acquire_group() 582 group->retries = 3; acquire_group() 583 group->port = port; acquire_group() 584 group->rec.mgid = *mgid; acquire_group() 585 group->pkey_index = MCAST_INVALID_PKEY_INDEX; acquire_group() 586 INIT_LIST_HEAD(&group->pending_list); acquire_group() 587 INIT_LIST_HEAD(&group->active_list); acquire_group() 588 INIT_WORK(&group->work, mcast_work_handler); acquire_group() 589 spin_lock_init(&group->lock); acquire_group() 592 cur_group = mcast_insert(port, group, is_mgid0); acquire_group() 594 kfree(group); acquire_group() 595 group = cur_group; acquire_group() 599 atomic_inc(&group->refcount); acquire_group() 601 return group; acquire_group() 605 * We serialize all join requests to a single group to make our lives much 606 * easier. Otherwise, two users could try to join the same group 643 member->group = acquire_group(&dev->port[port_num - dev->start_port], ib_sa_join_multicast() 645 if (!member->group) { ib_sa_join_multicast() 670 struct mcast_group *group; ib_sa_free_multicast() local 673 group = member->group; ib_sa_free_multicast() 675 spin_lock_irq(&group->lock); ib_sa_free_multicast() 677 adjust_membership(group, multicast->rec.join_state, -1); ib_sa_free_multicast() 681 if (group->state == MCAST_IDLE) { ib_sa_free_multicast() 682 group->state = MCAST_BUSY; ib_sa_free_multicast() 683 spin_unlock_irq(&group->lock); ib_sa_free_multicast() 684 /* Continue to hold reference on group until callback */ ib_sa_free_multicast() 685 queue_work(mcast_wq, &group->work); ib_sa_free_multicast() 687 spin_unlock_irq(&group->lock); ib_sa_free_multicast() 688 release_group(group); ib_sa_free_multicast() 703 struct mcast_group *group; ib_sa_get_mcmember_rec() local 713 group = mcast_find(port, mgid); ib_sa_get_mcmember_rec() 714 if (group) ib_sa_get_mcmember_rec() 715 *rec = group->rec; ib_sa_get_mcmember_rec() 757 struct mcast_group *group; mcast_groups_event() local 763 group = rb_entry(node, struct mcast_group, node); mcast_groups_event() 764 spin_lock(&group->lock); mcast_groups_event() 765 if (group->state == MCAST_IDLE) { mcast_groups_event() 766 atomic_inc(&group->refcount); mcast_groups_event() 767 queue_work(mcast_wq, &group->work); mcast_groups_event() 769 if (group->state != MCAST_GROUP_ERROR) mcast_groups_event() 770 group->state = state; mcast_groups_event() 771 spin_unlock(&group->lock); mcast_groups_event() 151 mcast_insert(struct mcast_port *port, struct mcast_group *group, int allow_duplicates) mcast_insert() argument
|
/linux-4.1.27/fs/sysfs/ |
H A D | Makefile | 5 obj-y := file.o dir.o symlink.o mount.o group.o
|
H A D | group.c | 2 * fs/sysfs/group.c - Operations for adding/removing multiple files at once. 106 WARN(1, "sysfs: (bin_)attrs not set by subsystem for group: %s/%s\n", internal_create_group() 131 * sysfs_create_group - given a directory kobject, create an attribute group 132 * @kobj: The kobject to create the group on 133 * @grp: The attribute group to create 135 * This function creates a group for the first time. It will explicitly 149 * @kobj: The kobject to create the group on 153 * creating a group, all previously created groups will be removed, unwinding 182 * sysfs_update_group - given a directory kobject, update an attribute group 183 * @kobj: The kobject to update the group on 184 * @grp: The attribute group to update 186 * This function updates an attribute group. Unlike 194 * that affects group visibility. 206 * sysfs_remove_group: remove a group from a kobject 207 * @kobj: kobject to remove the group from 208 * @grp: group to remove 210 * This function removes a group of attributes from a kobject. The attributes 211 * previously have to have been created for this group, otherwise it will fail. 223 "sysfs group %p not found for kobject '%s'\n", sysfs_remove_group() 261 * sysfs_merge_group - merge files into a pre-existing attribute group. 262 * @kobj: The kobject containing the group. 263 * @grp: The files to create and the attribute group they belong to. 265 * This function returns an error if the group doesn't exist or any of the 266 * files already exist in that group, in which case none of the new files 294 * sysfs_unmerge_group - remove files from a pre-existing attribute group. 295 * @kobj: The kobject containing the group. 296 * @grp: The files to remove and the attribute group they belong to. 314 * sysfs_add_link_to_group - add a symlink to an attribute group. 315 * @kobj: The kobject containing the group. 316 * @group_name: The name of the group. 338 * sysfs_remove_link_from_group - remove a symlink from an attribute group. 339 * @kobj: The kobject containing the group. 340 * @group_name: The name of the group.
|
H A D | file.c | 344 * sysfs_add_file_to_group - add an attribute file to a pre-existing group. 347 * @group: group name. 350 const struct attribute *attr, const char *group) sysfs_add_file_to_group() 355 if (group) { sysfs_add_file_to_group() 356 parent = kernfs_find_and_get(kobj->sd, group); sysfs_add_file_to_group() 449 * sysfs_remove_file_from_group - remove an attribute file from a group. 452 * @group: group name. 455 const struct attribute *attr, const char *group) sysfs_remove_file_from_group() 459 if (group) { sysfs_remove_file_from_group() 460 parent = kernfs_find_and_get(kobj->sd, group); sysfs_remove_file_from_group() 349 sysfs_add_file_to_group(struct kobject *kobj, const struct attribute *attr, const char *group) sysfs_add_file_to_group() argument 454 sysfs_remove_file_from_group(struct kobject *kobj, const struct attribute *attr, const char *group) sysfs_remove_file_from_group() argument
|
/linux-4.1.27/drivers/clk/shmobile/ |
H A D | clk-mstp.c | 28 * struct mstp_clock_group - MSTP gating clocks group 30 * @data: clocks in this group 46 * @group: MSTP clocks group 51 struct mstp_clock_group *group; member in struct:mstp_clock 59 struct mstp_clock_group *group = clock->group; cpg_mstp_clock_endisable() local 65 spin_lock_irqsave(&group->lock, flags); cpg_mstp_clock_endisable() 67 value = clk_readl(group->smstpcr); cpg_mstp_clock_endisable() 72 clk_writel(value, group->smstpcr); cpg_mstp_clock_endisable() 74 spin_unlock_irqrestore(&group->lock, flags); cpg_mstp_clock_endisable() 76 if (!enable || !group->mstpsr) cpg_mstp_clock_endisable() 80 if (!(clk_readl(group->mstpsr) & bitmask)) cpg_mstp_clock_endisable() 87 group->smstpcr, clock->bit_index); cpg_mstp_clock_endisable() 107 struct mstp_clock_group *group = clock->group; cpg_mstp_clock_is_enabled() local 110 if (group->mstpsr) cpg_mstp_clock_is_enabled() 111 value = clk_readl(group->mstpsr); cpg_mstp_clock_is_enabled() 113 value = clk_readl(group->smstpcr); cpg_mstp_clock_is_enabled() 126 unsigned int index, struct mstp_clock_group *group) cpg_mstp_clock_register() 145 clock->group = group; cpg_mstp_clock_register() 158 struct mstp_clock_group *group; cpg_mstp_clocks_init() local 163 group = kzalloc(sizeof(*group), GFP_KERNEL); cpg_mstp_clocks_init() 165 if (group == NULL || clks == NULL) { cpg_mstp_clocks_init() 166 kfree(group); cpg_mstp_clocks_init() 168 pr_err("%s: failed to allocate group\n", __func__); cpg_mstp_clocks_init() 172 spin_lock_init(&group->lock); cpg_mstp_clocks_init() 173 group->data.clks = clks; cpg_mstp_clocks_init() 175 group->smstpcr = of_iomap(np, 0); cpg_mstp_clocks_init() 176 group->mstpsr = of_iomap(np, 1); cpg_mstp_clocks_init() 178 if (group->smstpcr == NULL) { cpg_mstp_clocks_init() 180 kfree(group); cpg_mstp_clocks_init() 217 clkidx, group); cpg_mstp_clocks_init() 219 group->data.clk_num = max(group->data.clk_num, cpg_mstp_clocks_init() 236 of_clk_add_provider(np, of_clk_src_onecell_get, &group->data); cpg_mstp_clocks_init() 125 cpg_mstp_clock_register(const char *name, const char *parent_name, unsigned int index, struct mstp_clock_group *group) cpg_mstp_clock_register() argument
|
/linux-4.1.27/drivers/iommu/ |
H A D | iommu.c | 64 ssize_t (*show)(struct iommu_group *group, char *buf); 65 ssize_t (*store)(struct iommu_group *group, 82 struct iommu_group *group = to_iommu_group(kobj); iommu_group_attr_show() local 86 ret = attr->show(group, buf); iommu_group_attr_show() 95 struct iommu_group *group = to_iommu_group(kobj); iommu_group_attr_store() local 99 ret = attr->store(group, buf, count); iommu_group_attr_store() 108 static int iommu_group_create_file(struct iommu_group *group, iommu_group_create_file() argument 111 return sysfs_create_file(&group->kobj, &attr->attr); iommu_group_create_file() 114 static void iommu_group_remove_file(struct iommu_group *group, iommu_group_remove_file() argument 117 sysfs_remove_file(&group->kobj, &attr->attr); iommu_group_remove_file() 120 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) iommu_group_show_name() argument 122 return sprintf(buf, "%s\n", group->name); iommu_group_show_name() 129 struct iommu_group *group = to_iommu_group(kobj); iommu_group_release() local 131 if (group->iommu_data_release) iommu_group_release() 132 group->iommu_data_release(group->iommu_data); iommu_group_release() 135 ida_remove(&iommu_group_ida, group->id); iommu_group_release() 138 kfree(group->name); iommu_group_release() 139 kfree(group); iommu_group_release() 148 * iommu_group_alloc - Allocate a new group 149 * @name: Optional name to associate with group, visible in sysfs 152 * group. The iommu group represents the minimum granularity of the iommu. 154 * group in order to hold the group until devices are added. Use 156 * group to be automatically reclaimed once it has no devices or external 161 struct iommu_group *group; iommu_group_alloc() local 164 group = kzalloc(sizeof(*group), GFP_KERNEL); iommu_group_alloc() 165 if (!group) iommu_group_alloc() 168 group->kobj.kset = iommu_group_kset; iommu_group_alloc() 169 mutex_init(&group->mutex); iommu_group_alloc() 170 INIT_LIST_HEAD(&group->devices); iommu_group_alloc() 171 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); iommu_group_alloc() 177 kfree(group); iommu_group_alloc() 182 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id)) iommu_group_alloc() 187 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, iommu_group_alloc() 188 NULL, "%d", group->id); iommu_group_alloc() 191 ida_remove(&iommu_group_ida, group->id); iommu_group_alloc() 193 kfree(group); iommu_group_alloc() 197 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); iommu_group_alloc() 198 if (!group->devices_kobj) { iommu_group_alloc() 199 kobject_put(&group->kobj); /* triggers .release & free */ iommu_group_alloc() 204 * The devices_kobj holds a reference on the group kobject, so iommu_group_alloc() 205 * as long as that exists so will the group. We can therefore iommu_group_alloc() 208 kobject_put(&group->kobj); iommu_group_alloc() 210 return group; iommu_group_alloc() 217 struct iommu_group *group; iommu_group_get_by_id() local 233 group = container_of(group_kobj, struct iommu_group, kobj); iommu_group_get_by_id() 234 BUG_ON(group->id != id); iommu_group_get_by_id() 236 kobject_get(group->devices_kobj); iommu_group_get_by_id() 237 kobject_put(&group->kobj); iommu_group_get_by_id() 239 return group; iommu_group_get_by_id() 244 * iommu_group_get_iommudata - retrieve iommu_data registered for a group 245 * @group: the group 247 * iommu drivers can store data in the group for use when doing iommu 249 * should hold a group reference. 251 void *iommu_group_get_iommudata(struct iommu_group *group) iommu_group_get_iommudata() argument 253 return group->iommu_data; iommu_group_get_iommudata() 258 * iommu_group_set_iommudata - set iommu_data for a group 259 * @group: the group 263 * iommu drivers can store data in the group for use when doing iommu 265 * the group has been allocated. Caller should hold a group reference. 267 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, iommu_group_set_iommudata() argument 270 group->iommu_data = iommu_data; iommu_group_set_iommudata() 271 group->iommu_data_release = release; iommu_group_set_iommudata() 276 * iommu_group_set_name - set name for a group 277 * @group: the group 280 * Allow iommu driver to set a name for a group. When set it will 281 * appear in a name attribute file under the group in sysfs. 283 int iommu_group_set_name(struct iommu_group *group, const char *name) iommu_group_set_name() argument 287 if (group->name) { iommu_group_set_name() 288 iommu_group_remove_file(group, &iommu_group_attr_name); iommu_group_set_name() 289 kfree(group->name); iommu_group_set_name() 290 group->name = NULL; iommu_group_set_name() 295 group->name = kstrdup(name, GFP_KERNEL); iommu_group_set_name() 296 if (!group->name) iommu_group_set_name() 299 ret = iommu_group_create_file(group, &iommu_group_attr_name); iommu_group_set_name() 301 kfree(group->name); iommu_group_set_name() 302 group->name = NULL; iommu_group_set_name() 311 * iommu_group_add_device - add a device to an iommu group 312 * @group: the group into which to add the device (reference should be held) 316 * group. Adding a device increments the group reference count. 318 int iommu_group_add_device(struct iommu_group *group, struct device *dev) iommu_group_add_device() argument 329 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); iommu_group_add_device() 343 ret = sysfs_create_link_nowarn(group->devices_kobj, iommu_group_add_device() 362 kobject_get(group->devices_kobj); iommu_group_add_device() 364 dev->iommu_group = group; iommu_group_add_device() 366 mutex_lock(&group->mutex); iommu_group_add_device() 367 list_add_tail(&device->list, &group->devices); iommu_group_add_device() 368 mutex_unlock(&group->mutex); iommu_group_add_device() 370 /* Notify any listeners about change to group. */ iommu_group_add_device() 371 blocking_notifier_call_chain(&group->notifier, iommu_group_add_device() 374 trace_add_device_to_group(group->id, dev); iommu_group_add_device() 380 * iommu_group_remove_device - remove a device from it's current group 384 * it's current group. This decrements the iommu group reference count. 388 struct iommu_group *group = dev->iommu_group; iommu_group_remove_device() local 392 blocking_notifier_call_chain(&group->notifier, iommu_group_remove_device() 395 mutex_lock(&group->mutex); iommu_group_remove_device() 396 list_for_each_entry(tmp_device, &group->devices, list) { iommu_group_remove_device() 403 mutex_unlock(&group->mutex); iommu_group_remove_device() 408 sysfs_remove_link(group->devices_kobj, device->name); iommu_group_remove_device() 411 trace_remove_device_from_group(group->id, dev); iommu_group_remove_device() 416 kobject_put(group->devices_kobj); iommu_group_remove_device() 421 * iommu_group_for_each_dev - iterate over each device in the group 422 * @group: the group 426 * This function is called by group users to iterate over group devices. 427 * Callers should hold a reference count to the group during callback. 428 * The group->mutex is held across callbacks, which will block calls to 431 int iommu_group_for_each_dev(struct iommu_group *group, void *data, iommu_group_for_each_dev() argument 437 mutex_lock(&group->mutex); iommu_group_for_each_dev() 438 list_for_each_entry(device, &group->devices, list) { iommu_group_for_each_dev() 443 mutex_unlock(&group->mutex); iommu_group_for_each_dev() 449 * iommu_group_get - Return the group for a device and increment reference 450 * @dev: get the group that this device belongs to 452 * This function is called by iommu drivers and users to get the group 453 * for the specified device. If found, the group is returned and the group 458 struct iommu_group *group = dev->iommu_group; iommu_group_get() local 460 if (group) iommu_group_get() 461 kobject_get(group->devices_kobj); iommu_group_get() 463 return group; iommu_group_get() 468 * iommu_group_put - Decrement group reference 469 * @group: the group to use 472 * iommu group. Once the reference count is zero, the group is released. 474 void iommu_group_put(struct iommu_group *group) iommu_group_put() argument 476 if (group) iommu_group_put() 477 kobject_put(group->devices_kobj); iommu_group_put() 482 * iommu_group_register_notifier - Register a notifier for group changes 483 * @group: the group to watch 486 * This function allows iommu group users to track changes in a group. 488 * should hold a reference to the group throughout notifier registration. 490 int iommu_group_register_notifier(struct iommu_group *group, iommu_group_register_notifier() argument 493 return blocking_notifier_chain_register(&group->notifier, nb); iommu_group_register_notifier() 499 * @group: the group to watch 502 * Unregister a previously registered group notifier block. 504 int iommu_group_unregister_notifier(struct iommu_group *group, iommu_group_unregister_notifier() argument 507 return blocking_notifier_chain_unregister(&group->notifier, nb); iommu_group_unregister_notifier() 512 * iommu_group_id - Return ID for a group 513 * @group: the group to ID 515 * Return the unique ID for the group matching the sysfs group number. 517 int iommu_group_id(struct iommu_group *group) iommu_group_id() argument 519 return group->id; iommu_group_id() 540 * that may already have a group. 546 struct iommu_group *group; get_pci_function_alias_group() local 557 group = get_pci_alias_group(tmp, devfns); for_each_pci_dev() 558 if (group) { for_each_pci_dev() 560 return group; for_each_pci_dev() 580 struct iommu_group *group; get_pci_alias_group() local 585 group = iommu_group_get(&pdev->dev); get_pci_alias_group() 586 if (group) get_pci_alias_group() 587 return group; get_pci_alias_group() 599 group = get_pci_alias_group(tmp, devfns); for_each_pci_dev() 600 if (group) { for_each_pci_dev() 602 return group; for_each_pci_dev() 605 group = get_pci_function_alias_group(tmp, devfns); for_each_pci_dev() 606 if (group) { for_each_pci_dev() 608 return group; for_each_pci_dev() 618 struct iommu_group *group; member in struct:group_for_pci_data 623 * the IOMMU group if we find one along the way. 630 data->group = iommu_group_get(&pdev->dev); get_pci_alias_or_group() 632 return data->group != NULL; get_pci_alias_or_group() 637 * to find or create an IOMMU group for a device. 643 struct iommu_group *group = NULL; iommu_group_get_for_pci_dev() local 648 * be aliased due to topology in order to have its own IOMMU group. iommu_group_get_for_pci_dev() 650 * group, use it. iommu_group_get_for_pci_dev() 653 return data.group; iommu_group_get_for_pci_dev() 661 * group, use it. iommu_group_get_for_pci_dev() 672 group = iommu_group_get(&pdev->dev); iommu_group_get_for_pci_dev() 673 if (group) iommu_group_get_for_pci_dev() 674 return group; iommu_group_get_for_pci_dev() 679 * device or another device aliases us, use the same group. iommu_group_get_for_pci_dev() 681 group = get_pci_alias_group(pdev, (unsigned long *)devfns); iommu_group_get_for_pci_dev() 682 if (group) iommu_group_get_for_pci_dev() 683 return group; iommu_group_get_for_pci_dev() 690 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); iommu_group_get_for_pci_dev() 691 if (group) iommu_group_get_for_pci_dev() 692 return group; iommu_group_get_for_pci_dev() 694 /* No shared group found, allocate new */ iommu_group_get_for_pci_dev() 699 * iommu_group_get_for_dev - Find or create the IOMMU group for a device 704 * IOMMU group for a device. On success, the caller will hold a reference 705 * to the returned IOMMU group, which will already include the provided 710 struct iommu_group *group; iommu_group_get_for_dev() local 713 group = iommu_group_get(dev); iommu_group_get_for_dev() 714 if (group) iommu_group_get_for_dev() 715 return group; iommu_group_get_for_dev() 720 group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); iommu_group_get_for_dev() 722 if (IS_ERR(group)) iommu_group_get_for_dev() 723 return group; iommu_group_get_for_dev() 725 ret = iommu_group_add_device(group, dev); iommu_group_get_for_dev() 727 iommu_group_put(group); iommu_group_get_for_dev() 731 return group; iommu_group_get_for_dev() 754 struct iommu_group *group; iommu_bus_notifier() local 759 * result in ADD/DEL notifiers to group->notifier iommu_bus_notifier() 773 * group, if anyone is listening iommu_bus_notifier() 775 group = iommu_group_get(dev); iommu_bus_notifier() 776 if (!group) iommu_bus_notifier() 795 blocking_notifier_call_chain(&group->notifier, iommu_bus_notifier() 798 iommu_group_put(group); iommu_bus_notifier() 951 * iterating over the devices in a group. Ideally we'd have a single 952 * device which represents the requestor ID of the group, but we also 955 * wish to group them at a higher level (ex. untrusted multi-function 965 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_attach_group() argument 967 return iommu_group_for_each_dev(group, domain, iommu_attach_group() 981 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_detach_group() argument 983 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device); iommu_detach_group()
|
H A D | fsl_pamu_domain.c | 870 struct iommu_group *group; get_device_iommu_group() local 872 group = iommu_group_get(dev); get_device_iommu_group() 873 if (!group) get_device_iommu_group() 874 group = iommu_group_alloc(); get_device_iommu_group() 876 return group; get_device_iommu_group() 890 /* Get iommu group information from peer devices or devices on the parent bus */ get_shared_pci_device_group() 894 struct iommu_group *group; get_shared_pci_device_group() local 899 * the shared iommu group. get_shared_pci_device_group() 905 group = iommu_group_get(&tmp->dev); get_shared_pci_device_group() 906 if (group) get_shared_pci_device_group() 907 return group; get_shared_pci_device_group() 920 struct iommu_group *group = NULL; get_pci_device_group() local 924 /* We can partition PCIe devices so assign device group to the device */ get_pci_device_group() 926 group = iommu_group_get_for_dev(&pdev->dev); get_pci_device_group() 937 * PCI controllers device group. If this is the first get_pci_device_group() 939 * device group information from the PCI controller device get_pci_device_group() 940 * node and remove the PCI controller iommu group. get_pci_device_group() 941 * For subsequent devices, the iommu group information can get_pci_device_group() 946 group = get_device_iommu_group(pci_ctl->parent); get_pci_device_group() 949 group = get_shared_pci_device_group(pdev); get_pci_device_group() 953 if (!group) get_pci_device_group() 954 group = ERR_PTR(-ENODEV); get_pci_device_group() 956 return group; get_pci_device_group() 961 struct iommu_group *group = ERR_PTR(-ENODEV); fsl_pamu_add_device() local 967 * For platform devices we allocate a separate group for fsl_pamu_add_device() 976 group = get_pci_device_group(pdev); fsl_pamu_add_device() 981 group = get_device_iommu_group(dev); fsl_pamu_add_device() 984 if (IS_ERR(group)) fsl_pamu_add_device() 985 return PTR_ERR(group); fsl_pamu_add_device() 988 * Check if device has already been added to an iommu group. fsl_pamu_add_device() 993 ret = iommu_group_add_device(group, dev); fsl_pamu_add_device() 995 iommu_group_put(group); fsl_pamu_add_device()
|
H A D | tegra-smmu.c | 298 const struct tegra_smmu_swgroup *group = NULL; tegra_smmu_find_swgroup() local 303 group = &smmu->soc->swgroups[i]; tegra_smmu_find_swgroup() 308 return group; tegra_smmu_find_swgroup() 314 const struct tegra_smmu_swgroup *group; tegra_smmu_enable() local 329 group = tegra_smmu_find_swgroup(smmu, swgroup); tegra_smmu_enable() 330 if (group) { tegra_smmu_enable() 331 value = smmu_readl(smmu, group->reg); tegra_smmu_enable() 335 smmu_writel(smmu, value, group->reg); tegra_smmu_enable() 342 const struct tegra_smmu_swgroup *group; tegra_smmu_disable() local 346 group = tegra_smmu_find_swgroup(smmu, swgroup); tegra_smmu_disable() 347 if (group) { tegra_smmu_disable() 348 value = smmu_readl(smmu, group->reg); tegra_smmu_disable() 352 smmu_writel(smmu, value, group->reg); tegra_smmu_disable()
|
H A D | iommu-sysfs.c | 18 * We provide a common class "devices" group which initially has no attributes. 19 * As devices are added to the IOMMU, we'll add links to the group. 54 * attributes can be provided as an attribute group, allowing a unique
|
/linux-4.1.27/fs/notify/fanotify/ |
H A D | fanotify_user.c | 52 * Called with the group->notification_mutex held. 54 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, get_one_event() argument 57 BUG_ON(!mutex_is_locked(&group->notification_mutex)); get_one_event() 59 pr_debug("%s: group=%p count=%zd\n", __func__, group, count); get_one_event() 61 if (fsnotify_notify_queue_is_empty(group)) get_one_event() 69 return fsnotify_remove_first_event(group); get_one_event() 72 static int create_fd(struct fsnotify_group *group, create_fd() argument 79 pr_debug("%s: group=%p event=%p\n", __func__, group, event); create_fd() 81 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags); create_fd() 93 group->fanotify_data.f_flags | FMODE_NONOTIFY, create_fd() 114 static int fill_event_metadata(struct fsnotify_group *group, fill_event_metadata() argument 122 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, fill_event_metadata() 123 group, metadata, fsn_event); fill_event_metadata() 136 metadata->fd = create_fd(group, event, file); fill_event_metadata() 146 struct fsnotify_group *group, int fd) dequeue_event() 150 spin_lock(&group->fanotify_data.access_lock); dequeue_event() 151 list_for_each_entry(event, &group->fanotify_data.access_list, dequeue_event() 160 spin_unlock(&group->fanotify_data.access_lock); dequeue_event() 167 static int process_access_response(struct fsnotify_group *group, process_access_response() argument 174 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, process_access_response() 192 event = dequeue_event(group, fd); process_access_response() 197 wake_up(&group->fanotify_data.access_waitq); process_access_response() 203 static ssize_t copy_event_to_user(struct fsnotify_group *group, copy_event_to_user() argument 211 pr_debug("%s: group=%p event=%p\n", __func__, group, event); copy_event_to_user() 213 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); copy_event_to_user() 243 struct fsnotify_group *group = file->private_data; fanotify_poll() local 246 poll_wait(file, &group->notification_waitq, wait); fanotify_poll() 247 mutex_lock(&group->notification_mutex); fanotify_poll() 248 if (!fsnotify_notify_queue_is_empty(group)) fanotify_poll() 250 mutex_unlock(&group->notification_mutex); fanotify_poll() 258 struct fsnotify_group *group; fanotify_read() local 265 group = file->private_data; fanotify_read() 267 pr_debug("%s: group=%p\n", __func__, group); fanotify_read() 269 add_wait_queue(&group->notification_waitq, &wait); fanotify_read() 271 mutex_lock(&group->notification_mutex); fanotify_read() 272 kevent = get_one_event(group, count); fanotify_read() 273 mutex_unlock(&group->notification_mutex); fanotify_read() 296 ret = copy_event_to_user(group, kevent, buf); fanotify_read() 302 fsnotify_destroy_event(group, kevent); fanotify_read() 309 wake_up(&group->fanotify_data.access_waitq); fanotify_read() 312 spin_lock(&group->fanotify_data.access_lock); fanotify_read() 314 &group->fanotify_data.access_list); fanotify_read() 315 spin_unlock(&group->fanotify_data.access_lock); fanotify_read() 321 remove_wait_queue(&group->notification_waitq, &wait); fanotify_read() 332 struct fsnotify_group *group; fanotify_write() local 335 group = file->private_data; fanotify_write() 340 pr_debug("%s: group=%p count=%zu\n", __func__, group, count); fanotify_write() 345 ret = process_access_response(group, &response); fanotify_write() 357 struct fsnotify_group *group = file->private_data; fanotify_release() local 367 spin_lock(&group->fanotify_data.access_lock); fanotify_release() 369 atomic_inc(&group->fanotify_data.bypass_perm); fanotify_release() 371 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, fanotify_release() 373 pr_debug("%s: found group=%p event=%p\n", __func__, group, fanotify_release() 379 spin_unlock(&group->fanotify_data.access_lock); fanotify_release() 388 wake_up(&group->fanotify_data.access_waitq); fanotify_release() 392 fsnotify_destroy_group(group); fanotify_release() 399 struct fsnotify_group *group; fanotify_ioctl() local 405 group = file->private_data; fanotify_ioctl() 411 mutex_lock(&group->notification_mutex); fanotify_ioctl() 412 list_for_each_entry(fsn_event, &group->notification_list, list) fanotify_ioctl() 414 mutex_unlock(&group->notification_mutex); fanotify_ioctl() 514 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, fanotify_remove_vfsmount_mark() argument 522 mutex_lock(&group->mark_mutex); fanotify_remove_vfsmount_mark() 523 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); fanotify_remove_vfsmount_mark() 525 mutex_unlock(&group->mark_mutex); fanotify_remove_vfsmount_mark() 532 fsnotify_destroy_mark_locked(fsn_mark, group); fanotify_remove_vfsmount_mark() 533 mutex_unlock(&group->mark_mutex); fanotify_remove_vfsmount_mark() 542 static int fanotify_remove_inode_mark(struct fsnotify_group *group, fanotify_remove_inode_mark() argument 550 mutex_lock(&group->mark_mutex); fanotify_remove_inode_mark() 551 fsn_mark = fsnotify_find_inode_mark(group, inode); fanotify_remove_inode_mark() 553 mutex_unlock(&group->mark_mutex); fanotify_remove_inode_mark() 560 fsnotify_destroy_mark_locked(fsn_mark, group); fanotify_remove_inode_mark() 561 mutex_unlock(&group->mark_mutex); fanotify_remove_inode_mark() 600 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, fanotify_add_new_mark() argument 607 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) fanotify_add_new_mark() 615 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0); fanotify_add_new_mark() 625 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, fanotify_add_vfsmount_mark() argument 632 mutex_lock(&group->mark_mutex); fanotify_add_vfsmount_mark() 633 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); fanotify_add_vfsmount_mark() 635 fsn_mark = fanotify_add_new_mark(group, NULL, mnt); fanotify_add_vfsmount_mark() 637 mutex_unlock(&group->mark_mutex); fanotify_add_vfsmount_mark() 642 mutex_unlock(&group->mark_mutex); fanotify_add_vfsmount_mark() 651 static int fanotify_add_inode_mark(struct fsnotify_group *group, fanotify_add_inode_mark() argument 658 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); fanotify_add_inode_mark() 670 mutex_lock(&group->mark_mutex); fanotify_add_inode_mark() 671 fsn_mark = fsnotify_find_inode_mark(group, inode); fanotify_add_inode_mark() 673 fsn_mark = fanotify_add_new_mark(group, inode, NULL); fanotify_add_inode_mark() 675 mutex_unlock(&group->mark_mutex); fanotify_add_inode_mark() 680 mutex_unlock(&group->mark_mutex); fanotify_add_inode_mark() 692 struct fsnotify_group *group; SYSCALL_DEFINE2() local 731 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); SYSCALL_DEFINE2() 732 if (IS_ERR(group)) { SYSCALL_DEFINE2() 734 return PTR_ERR(group); SYSCALL_DEFINE2() 737 group->fanotify_data.user = user; SYSCALL_DEFINE2() 745 group->overflow_event = &oevent->fse; SYSCALL_DEFINE2() 749 group->fanotify_data.f_flags = event_f_flags; SYSCALL_DEFINE2() 751 spin_lock_init(&group->fanotify_data.access_lock); SYSCALL_DEFINE2() 752 init_waitqueue_head(&group->fanotify_data.access_waitq); SYSCALL_DEFINE2() 753 INIT_LIST_HEAD(&group->fanotify_data.access_list); SYSCALL_DEFINE2() 754 atomic_set(&group->fanotify_data.bypass_perm, 0); SYSCALL_DEFINE2() 758 group->priority = FS_PRIO_0; SYSCALL_DEFINE2() 761 group->priority = FS_PRIO_1; SYSCALL_DEFINE2() 764 group->priority = FS_PRIO_2; SYSCALL_DEFINE2() 775 group->max_events = UINT_MAX; SYSCALL_DEFINE2() 777 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; SYSCALL_DEFINE2() 784 group->fanotify_data.max_marks = UINT_MAX; SYSCALL_DEFINE2() 786 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; SYSCALL_DEFINE2() 789 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); SYSCALL_DEFINE2() 796 fsnotify_destroy_group(group); SYSCALL_DEFINE2() 806 struct fsnotify_group *group; SYSCALL_DEFINE5() local 854 group = f.file->private_data; SYSCALL_DEFINE5() 857 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not SYSCALL_DEFINE5() 862 group->priority == FS_PRIO_0) SYSCALL_DEFINE5() 868 fsnotify_clear_vfsmount_marks_by_group(group); SYSCALL_DEFINE5() 870 fsnotify_clear_inode_marks_by_group(group); SYSCALL_DEFINE5() 878 /* inode held in place by reference to path; group by fget on fd */ SYSCALL_DEFINE5() 888 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); SYSCALL_DEFINE5() 890 ret = fanotify_add_inode_mark(group, inode, mask, flags); SYSCALL_DEFINE5() 894 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); SYSCALL_DEFINE5() 896 ret = fanotify_remove_inode_mark(group, inode, mask, flags); SYSCALL_DEFINE5() 145 dequeue_event( struct fsnotify_group *group, int fd) dequeue_event() argument
|
H A D | fanotify.c | 63 static int fanotify_get_response(struct fsnotify_group *group, fanotify_get_response() argument 68 pr_debug("%s: group=%p event=%p\n", __func__, group, event); fanotify_get_response() 70 wait_event(group->fanotify_data.access_waitq, event->response || fanotify_get_response() 71 atomic_read(&group->fanotify_data.bypass_perm)); fanotify_get_response() 75 * Event was canceled because group is being destroyed. Remove fanotify_get_response() 76 * it from group's event list because we are responsible for fanotify_get_response() 79 fsnotify_remove_event(group, &event->fae.fse); fanotify_get_response() 94 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, fanotify_get_response() 95 group, event, ret); fanotify_get_response() 187 static int fanotify_handle_event(struct fsnotify_group *group, fanotify_handle_event() argument 213 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, fanotify_handle_event() 221 ret = fsnotify_add_event(group, fsn_event, fanotify_merge); fanotify_handle_event() 226 fsnotify_destroy_event(group, fsn_event); fanotify_handle_event() 233 ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event)); fanotify_handle_event() 234 fsnotify_destroy_event(group, fsn_event); fanotify_handle_event() 240 static void fanotify_free_group_priv(struct fsnotify_group *group) fanotify_free_group_priv() argument 244 user = group->fanotify_data.user; fanotify_free_group_priv()
|
H A D | fanotify.h | 28 * group->notification_list to group->fanotify_data.access_list to wait for
|
/linux-4.1.27/drivers/vfio/ |
H A D | vfio.c | 92 struct vfio_group *group; member in struct:vfio_device 149 static int vfio_alloc_group_minor(struct vfio_group *group) vfio_alloc_group_minor() argument 151 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); vfio_alloc_group_minor() 161 static void vfio_group_get(struct vfio_group *group); 166 * it's freed via kref. Must support container/group/device being 187 static void vfio_group_unlock_and_free(struct vfio_group *group) vfio_group_unlock_and_free() argument 192 * that the group is no longer in vfio.group_list. vfio_group_unlock_and_free() 194 iommu_group_unregister_notifier(group->iommu_group, &group->nb); vfio_group_unlock_and_free() 195 kfree(group); vfio_group_unlock_and_free() 203 struct vfio_group *group, *tmp; vfio_create_group() local 207 group = kzalloc(sizeof(*group), GFP_KERNEL); vfio_create_group() 208 if (!group) vfio_create_group() 211 kref_init(&group->kref); vfio_create_group() 212 INIT_LIST_HEAD(&group->device_list); vfio_create_group() 213 mutex_init(&group->device_lock); vfio_create_group() 214 INIT_LIST_HEAD(&group->unbound_list); vfio_create_group() 215 mutex_init(&group->unbound_lock); vfio_create_group() 216 atomic_set(&group->container_users, 0); vfio_create_group() 217 atomic_set(&group->opened, 0); vfio_create_group() 218 group->iommu_group = iommu_group; vfio_create_group() 220 group->nb.notifier_call = vfio_iommu_group_notifier; vfio_create_group() 226 * do anything unless it can find the group in vfio.group_list, so vfio_create_group() 229 ret = iommu_group_register_notifier(iommu_group, &group->nb); vfio_create_group() 231 kfree(group); vfio_create_group() 237 /* Did we race creating this group? */ vfio_create_group() 241 vfio_group_unlock_and_free(group); vfio_create_group() 246 minor = vfio_alloc_group_minor(group); vfio_create_group() 248 vfio_group_unlock_and_free(group); vfio_create_group() 254 group, "%d", iommu_group_id(iommu_group)); vfio_create_group() 257 vfio_group_unlock_and_free(group); vfio_create_group() 261 group->minor = minor; vfio_create_group() 262 group->dev = dev; vfio_create_group() 264 list_add(&group->vfio_next, &vfio.group_list); vfio_create_group() 268 return group; vfio_create_group() 274 struct vfio_group *group = container_of(kref, struct vfio_group, kref); vfio_group_release() local 276 struct iommu_group *iommu_group = group->iommu_group; vfio_group_release() 278 WARN_ON(!list_empty(&group->device_list)); vfio_group_release() 281 &group->unbound_list, unbound_next) { vfio_group_release() 286 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); vfio_group_release() 287 list_del(&group->vfio_next); vfio_group_release() 288 vfio_free_group_minor(group->minor); vfio_group_release() 289 vfio_group_unlock_and_free(group); vfio_group_release() 293 static void vfio_group_put(struct vfio_group *group) vfio_group_put() argument 295 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); vfio_group_put() 298 /* Assume group_lock or group reference is held */ vfio_group_get() 299 static void vfio_group_get(struct vfio_group *group) vfio_group_get() argument 301 kref_get(&group->kref); vfio_group_get() 306 * sure the group pointer is valid under lock and get a reference. 308 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) vfio_group_try_get() argument 310 struct vfio_group *target = group; vfio_group_try_get() 313 list_for_each_entry(group, &vfio.group_list, vfio_next) { vfio_group_try_get() 314 if (group == target) { vfio_group_try_get() 315 vfio_group_get(group); vfio_group_try_get() 317 return group; vfio_group_try_get() 328 struct vfio_group *group; vfio_group_get_from_iommu() local 331 list_for_each_entry(group, &vfio.group_list, vfio_next) { vfio_group_get_from_iommu() 332 if (group->iommu_group == iommu_group) { vfio_group_get_from_iommu() 333 vfio_group_get(group); vfio_group_get_from_iommu() 335 return group; vfio_group_get_from_iommu() 345 struct vfio_group *group; vfio_group_get_from_minor() local 348 group = idr_find(&vfio.group_idr, minor); vfio_group_get_from_minor() 349 if (!group) { vfio_group_get_from_minor() 353 vfio_group_get(group); vfio_group_get_from_minor() 356 return group; vfio_group_get_from_minor() 363 struct vfio_device *vfio_group_create_device(struct vfio_group *group, vfio_group_create_device() argument 376 device->group = group; vfio_group_create_device() 381 /* No need to get group_lock, caller has group reference */ vfio_group_create_device() 382 vfio_group_get(group); vfio_group_create_device() 384 mutex_lock(&group->device_lock); vfio_group_create_device() 385 list_add(&device->group_next, &group->device_list); vfio_group_create_device() 386 mutex_unlock(&group->device_lock); vfio_group_create_device() 395 struct vfio_group *group = device->group; vfio_device_release() local 398 mutex_unlock(&group->device_lock); vfio_device_release() 408 /* Device reference always implies a group reference */ vfio_device_put() 411 struct vfio_group *group = device->group; vfio_device_put() local 412 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); vfio_device_put() 413 vfio_group_put(group); vfio_device_put() 419 vfio_group_get(device->group); vfio_device_get() 423 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, vfio_group_get_device() argument 428 mutex_lock(&group->device_lock); vfio_group_get_device() 429 list_for_each_entry(device, &group->device_list, group_next) { vfio_group_get_device() 432 mutex_unlock(&group->device_lock); vfio_group_get_device() 436 mutex_unlock(&group->device_lock); vfio_group_get_device() 442 * a device. It's not always practical to leave a device within a group 460 * A vfio group is viable for use by userspace if all devices are in 468 * group. The second is to test if the device exists on the group 474 struct vfio_group *group = data; vfio_dev_viable() local 480 mutex_lock(&group->unbound_lock); vfio_dev_viable() 481 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { vfio_dev_viable() 487 mutex_unlock(&group->unbound_lock); vfio_dev_viable() 492 device = vfio_group_get_device(group, dev); vfio_dev_viable() 504 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) vfio_group_nb_add_dev() argument 509 device = vfio_group_get_device(group, dev); vfio_group_nb_add_dev() 516 if (!atomic_read(&group->container_users)) vfio_group_nb_add_dev() 520 WARN("Device %s added to live group %d!\n", dev_name(dev), vfio_group_nb_add_dev() 521 iommu_group_id(group->iommu_group)); vfio_group_nb_add_dev() 526 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) vfio_group_nb_verify() argument 528 /* We don't care what happens when the group isn't in use */ vfio_group_nb_verify() 529 if (!atomic_read(&group->container_users)) vfio_group_nb_verify() 532 return vfio_dev_viable(dev, group); vfio_group_nb_verify() 538 struct vfio_group *group = container_of(nb, struct vfio_group, nb); vfio_iommu_group_notifier() local 544 * risk racing a group being removed. Ignore spurious notifies. vfio_iommu_group_notifier() 546 group = vfio_group_try_get(group); vfio_iommu_group_notifier() 547 if (!group) vfio_iommu_group_notifier() 552 vfio_group_nb_add_dev(group, dev); vfio_iommu_group_notifier() 564 pr_debug("%s: Device %s, group %d binding to driver\n", vfio_iommu_group_notifier() 566 iommu_group_id(group->iommu_group)); vfio_iommu_group_notifier() 569 pr_debug("%s: Device %s, group %d bound to driver %s\n", vfio_iommu_group_notifier() 571 iommu_group_id(group->iommu_group), dev->driver->name); vfio_iommu_group_notifier() 572 BUG_ON(vfio_group_nb_verify(group, dev)); vfio_iommu_group_notifier() 575 pr_debug("%s: Device %s, group %d unbinding from driver %s\n", vfio_iommu_group_notifier() 577 iommu_group_id(group->iommu_group), dev->driver->name); vfio_iommu_group_notifier() 580 pr_debug("%s: Device %s, group %d unbound from driver\n", vfio_iommu_group_notifier() 582 iommu_group_id(group->iommu_group)); vfio_iommu_group_notifier() 584 * XXX An unbound device in a live group is ok, but we'd vfio_iommu_group_notifier() 591 mutex_lock(&group->unbound_lock); vfio_iommu_group_notifier() 593 &group->unbound_list, unbound_next) { vfio_iommu_group_notifier() 600 mutex_unlock(&group->unbound_lock); vfio_iommu_group_notifier() 604 vfio_group_put(group); vfio_iommu_group_notifier() 615 struct vfio_group *group; vfio_add_group_dev() local 622 group = vfio_group_get_from_iommu(iommu_group); vfio_add_group_dev() 623 if (!group) { vfio_add_group_dev() 624 group = vfio_create_group(iommu_group); vfio_add_group_dev() 625 if (IS_ERR(group)) { vfio_add_group_dev() 627 return PTR_ERR(group); vfio_add_group_dev() 637 device = vfio_group_get_device(group, dev); vfio_add_group_dev() 639 WARN(1, "Device %s already exists on group %d\n", vfio_add_group_dev() 642 vfio_group_put(group); vfio_add_group_dev() 646 device = vfio_group_create_device(group, dev, ops, device_data); vfio_add_group_dev() 648 vfio_group_put(group); vfio_add_group_dev() 657 vfio_group_put(group); vfio_add_group_dev() 690 /* Given a referenced group, check if it contains the device */ vfio_dev_present() 691 static bool vfio_dev_present(struct vfio_group *group, struct device *dev) vfio_dev_present() argument 695 device = vfio_group_get_device(group, dev); vfio_dev_present() 709 struct vfio_group *group = device->group; vfio_del_group_dev() local 717 * The group exists so long as we have a device reference. Get vfio_del_group_dev() 718 * a group reference and use it to scan for the device going away. vfio_del_group_dev() 720 vfio_group_get(group); vfio_del_group_dev() 723 * When the device is removed from the group, the group suddenly vfio_del_group_dev() 725 * completes), but it's not present in the group. This is bad news vfio_del_group_dev() 726 * for any external users that need to re-acquire a group reference vfio_del_group_dev() 734 mutex_lock(&group->unbound_lock); vfio_del_group_dev() 735 list_add(&unbound->unbound_next, &group->unbound_list); vfio_del_group_dev() 736 mutex_unlock(&group->unbound_lock); vfio_del_group_dev() 743 * If the device is still present in the group after the above vfio_del_group_dev() 751 device = vfio_group_get_device(group, dev); vfio_del_group_dev() 762 !vfio_dev_present(group, dev), HZ * 10); vfio_del_group_dev() 765 !vfio_dev_present(group, dev), HZ * 10); vfio_del_group_dev() 777 vfio_group_put(group); vfio_del_group_dev() 835 struct vfio_group *group; __vfio_container_attach_groups() local 838 list_for_each_entry(group, &container->group_list, container_next) { __vfio_container_attach_groups() 839 ret = driver->ops->attach_group(data, group->iommu_group); __vfio_container_attach_groups() 847 list_for_each_entry_continue_reverse(group, &container->group_list, __vfio_container_attach_groups() 849 driver->ops->detach_group(data, group->iommu_group); __vfio_container_attach_groups() 865 * the group can be assigned to specific users. Therefore, only by vfio_ioctl_set_iommu() 866 * adding a group to a container does the user get the privilege of vfio_ioctl_set_iommu() 1072 static void __vfio_group_unset_container(struct vfio_group *group) __vfio_group_unset_container() argument 1074 struct vfio_container *container = group->container; __vfio_group_unset_container() 1082 group->iommu_group); __vfio_group_unset_container() 1084 group->container = NULL; __vfio_group_unset_container() 1085 list_del(&group->container_next); __vfio_group_unset_container() 1087 /* Detaching the last group deprivileges a container, remove iommu */ __vfio_group_unset_container() 1103 * the group, we know that still exists, therefore the only valid 1106 static int vfio_group_unset_container(struct vfio_group *group) vfio_group_unset_container() argument 1108 int users = atomic_cmpxchg(&group->container_users, 1, 0); vfio_group_unset_container() 1115 __vfio_group_unset_container(group); vfio_group_unset_container() 1122 * implicitly removes the group from the container. That is, if the 1123 * group file descriptor is closed, as well as any device file descriptors, 1124 * the group is free. 1126 static void vfio_group_try_dissolve_container(struct vfio_group *group) vfio_group_try_dissolve_container() argument 1128 if (0 == atomic_dec_if_positive(&group->container_users)) vfio_group_try_dissolve_container() 1129 __vfio_group_unset_container(group); vfio_group_try_dissolve_container() 1132 static int vfio_group_set_container(struct vfio_group *group, int container_fd) vfio_group_set_container() argument 1139 if (atomic_read(&group->container_users)) vfio_group_set_container() 1160 group->iommu_group); vfio_group_set_container() 1165 group->container = container; vfio_group_set_container() 1166 list_add(&group->container_next, &container->group_list); vfio_group_set_container() 1168 /* Get a reference on the container and mark a user within the group */ vfio_group_set_container() 1170 atomic_inc(&group->container_users); vfio_group_set_container() 1178 static bool vfio_group_viable(struct vfio_group *group) vfio_group_viable() argument 1180 return (iommu_group_for_each_dev(group->iommu_group, vfio_group_viable() 1181 group, vfio_dev_viable) == 0); vfio_group_viable() 1186 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) vfio_group_get_device_fd() argument 1192 if (0 == atomic_read(&group->container_users) || vfio_group_get_device_fd() 1193 !group->container->iommu_driver || !vfio_group_viable(group)) vfio_group_get_device_fd() 1196 mutex_lock(&group->device_lock); vfio_group_get_device_fd() 1197 list_for_each_entry(device, &group->device_list, group_next) { vfio_group_get_device_fd() 1231 atomic_inc(&group->container_users); vfio_group_get_device_fd() 1236 mutex_unlock(&group->device_lock); vfio_group_get_device_fd() 1244 struct vfio_group *group = filep->private_data; vfio_group_fops_unl_ioctl() local 1263 if (vfio_group_viable(group)) vfio_group_fops_unl_ioctl() 1266 if (group->container) vfio_group_fops_unl_ioctl() 1285 ret = vfio_group_set_container(group, fd); vfio_group_fops_unl_ioctl() 1289 ret = vfio_group_unset_container(group); vfio_group_fops_unl_ioctl() 1299 ret = vfio_group_get_device_fd(group, buf); vfio_group_fops_unl_ioctl() 1319 struct vfio_group *group; vfio_group_fops_open() local 1322 group = vfio_group_get_from_minor(iminor(inode)); vfio_group_fops_open() 1323 if (!group) vfio_group_fops_open() 1326 /* Do we need multiple instances of the group open? Seems not. */ vfio_group_fops_open() 1327 opened = atomic_cmpxchg(&group->opened, 0, 1); vfio_group_fops_open() 1329 vfio_group_put(group); vfio_group_fops_open() 1334 if (group->container) { vfio_group_fops_open() 1335 atomic_dec(&group->opened); vfio_group_fops_open() 1336 vfio_group_put(group); vfio_group_fops_open() 1340 filep->private_data = group; vfio_group_fops_open() 1347 struct vfio_group *group = filep->private_data; vfio_group_fops_release() local 1351 vfio_group_try_dissolve_container(group); vfio_group_fops_release() 1353 atomic_dec(&group->opened); vfio_group_fops_release() 1355 vfio_group_put(group); vfio_group_fops_release() 1379 vfio_group_try_dissolve_container(device->group); vfio_device_fops_release() 1457 * - attaching group(s) to it; 1462 * 2. User space passes a group fd to an external user. 1465 * - the group is initialized; 1469 * the VFIO group from disposal before KVM exits. 1475 * vfio_group_put_external_user() to release the VFIO group. 1480 struct vfio_group *group = filep->private_data; vfio_group_get_external_user() local 1485 if (!atomic_inc_not_zero(&group->container_users)) vfio_group_get_external_user() 1488 if (!group->container->iommu_driver || vfio_group_get_external_user() 1489 !vfio_group_viable(group)) { vfio_group_get_external_user() 1490 atomic_dec(&group->container_users); vfio_group_get_external_user() 1494 vfio_group_get(group); vfio_group_get_external_user() 1496 return group; vfio_group_get_external_user() 1500 void vfio_group_put_external_user(struct vfio_group *group) vfio_group_put_external_user() argument 1502 vfio_group_put(group); vfio_group_put_external_user() 1503 vfio_group_try_dissolve_container(group); vfio_group_put_external_user() 1507 int vfio_external_user_iommu_id(struct vfio_group *group) vfio_external_user_iommu_id() argument 1509 return iommu_group_id(group->iommu_group); vfio_external_user_iommu_id() 1513 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) vfio_external_check_extension() argument 1515 return vfio_ioctl_check_extension(group->container, arg); vfio_external_check_extension()
|
H A D | vfio_spapr_eeh.c | 34 long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, vfio_spapr_iommu_eeh_ioctl() argument 50 pe = eeh_iommu_group_to_pe(group); vfio_spapr_iommu_eeh_ioctl()
|
H A D | vfio_iommu_spapr_tce.c | 40 * The container descriptor supports only a single group per container. 41 * Required by the API as the container is not supplied with the IOMMU group 316 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", tce_iommu_attach_group() 319 pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n", tce_iommu_attach_group() 324 pr_err("tce_vfio: attaching group #%u to enabled container\n", tce_iommu_attach_group() 347 pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n", tce_iommu_detach_group() 352 pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n", tce_iommu_detach_group() 357 /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n", tce_iommu_detach_group()
|
/linux-4.1.27/fs/notify/inotify/ |
H A D | inotify_user.c | 114 struct fsnotify_group *group = file->private_data; inotify_poll() local 117 poll_wait(file, &group->notification_waitq, wait); inotify_poll() 118 mutex_lock(&group->notification_mutex); inotify_poll() 119 if (!fsnotify_notify_queue_is_empty(group)) inotify_poll() 121 mutex_unlock(&group->notification_mutex); inotify_poll() 141 * Called with the group->notification_mutex held. 143 static struct fsnotify_event *get_one_event(struct fsnotify_group *group, get_one_event() argument 149 if (fsnotify_notify_queue_is_empty(group)) get_one_event() 152 event = fsnotify_peek_first_event(group); get_one_event() 154 pr_debug("%s: group=%p event=%p\n", __func__, group, event); get_one_event() 162 fsnotify_remove_first_event(group); get_one_event() 173 static ssize_t copy_event_to_user(struct fsnotify_group *group, copy_event_to_user() argument 183 pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); copy_event_to_user() 226 struct fsnotify_group *group; inotify_read() local 233 group = file->private_data; inotify_read() 235 add_wait_queue(&group->notification_waitq, &wait); inotify_read() 237 mutex_lock(&group->notification_mutex); inotify_read() 238 kevent = get_one_event(group, count); inotify_read() 239 mutex_unlock(&group->notification_mutex); inotify_read() 241 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); inotify_read() 247 ret = copy_event_to_user(group, kevent, buf); inotify_read() 248 fsnotify_destroy_event(group, kevent); inotify_read() 268 remove_wait_queue(&group->notification_waitq, &wait); inotify_read() 277 struct fsnotify_group *group = file->private_data; inotify_release() local 279 pr_debug("%s: group=%p\n", __func__, group); inotify_release() 281 /* free this group, matching get was inotify_init->fsnotify_obtain_group */ inotify_release() 282 fsnotify_destroy_group(group); inotify_release() 290 struct fsnotify_group *group; inotify_ioctl() local 296 group = file->private_data; inotify_ioctl() 299 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); inotify_ioctl() 303 mutex_lock(&group->notification_mutex); inotify_ioctl() 304 list_for_each_entry(fsn_event, &group->notification_list, inotify_ioctl() 309 mutex_unlock(&group->notification_mutex); inotify_ioctl() 366 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, inotify_idr_find_locked() argument 369 struct idr *idr = &group->inotify_data.idr; inotify_idr_find_locked() 370 spinlock_t *idr_lock = &group->inotify_data.idr_lock; inotify_idr_find_locked() 387 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, inotify_idr_find() argument 391 spinlock_t *idr_lock = &group->inotify_data.idr_lock; inotify_idr_find() 394 i_mark = inotify_idr_find_locked(group, wd); inotify_idr_find() 400 static void do_inotify_remove_from_idr(struct fsnotify_group *group, do_inotify_remove_from_idr() argument 403 struct idr *idr = &group->inotify_data.idr; do_inotify_remove_from_idr() 404 spinlock_t *idr_lock = &group->inotify_data.idr_lock; do_inotify_remove_from_idr() 419 static void inotify_remove_from_idr(struct fsnotify_group *group, inotify_remove_from_idr() argument 422 spinlock_t *idr_lock = &group->inotify_data.idr_lock; inotify_remove_from_idr() 434 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" inotify_remove_from_idr() 436 i_mark->fsn_mark.group, i_mark->fsn_mark.inode); inotify_remove_from_idr() 441 found_i_mark = inotify_idr_find_locked(group, wd); inotify_remove_from_idr() 443 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" inotify_remove_from_idr() 445 i_mark->fsn_mark.group, i_mark->fsn_mark.inode); inotify_remove_from_idr() 455 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " inotify_remove_from_idr() 457 "found_i_mark->group=%p found_i_mark->inode=%p\n", inotify_remove_from_idr() 458 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group, inotify_remove_from_idr() 460 found_i_mark->fsn_mark.group, inotify_remove_from_idr() 471 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p" inotify_remove_from_idr() 473 i_mark->fsn_mark.group, i_mark->fsn_mark.inode); inotify_remove_from_idr() 478 do_inotify_remove_from_idr(group, i_mark); inotify_remove_from_idr() 491 struct fsnotify_group *group) inotify_ignored_and_remove_idr() 496 inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED, inotify_ignored_and_remove_idr() 501 inotify_remove_from_idr(group, i_mark); inotify_ignored_and_remove_idr() 503 atomic_dec(&group->inotify_data.user->inotify_watches); inotify_ignored_and_remove_idr() 516 static int inotify_update_existing_watch(struct fsnotify_group *group, inotify_update_existing_watch() argument 529 fsn_mark = fsnotify_find_inode_mark(group, inode); inotify_update_existing_watch() 567 static int inotify_new_watch(struct fsnotify_group *group, inotify_new_watch() argument 574 struct idr *idr = &group->inotify_data.idr; inotify_new_watch() 575 spinlock_t *idr_lock = &group->inotify_data.idr_lock; inotify_new_watch() 588 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches) inotify_new_watch() 596 ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode, inotify_new_watch() 600 inotify_remove_from_idr(group, tmp_i_mark); inotify_new_watch() 605 atomic_inc(&group->inotify_data.user->inotify_watches); inotify_new_watch() 617 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) inotify_update_watch() argument 621 mutex_lock(&group->mark_mutex); inotify_update_watch() 623 ret = inotify_update_existing_watch(group, inode, arg); inotify_update_watch() 626 ret = inotify_new_watch(group, inode, arg); inotify_update_watch() 627 mutex_unlock(&group->mark_mutex); inotify_update_watch() 634 struct fsnotify_group *group; inotify_new_group() local 637 group = fsnotify_alloc_group(&inotify_fsnotify_ops); inotify_new_group() 638 if (IS_ERR(group)) inotify_new_group() 639 return group; inotify_new_group() 643 fsnotify_destroy_group(group); inotify_new_group() 646 group->overflow_event = &oevent->fse; inotify_new_group() 647 fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); inotify_new_group() 652 group->max_events = max_events; inotify_new_group() 654 spin_lock_init(&group->inotify_data.idr_lock); inotify_new_group() 655 idr_init(&group->inotify_data.idr); inotify_new_group() 656 group->inotify_data.user = get_current_user(); inotify_new_group() 658 if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > inotify_new_group() 660 fsnotify_destroy_group(group); inotify_new_group() 664 return group; inotify_new_group() 671 struct fsnotify_group *group; SYSCALL_DEFINE1() local 681 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ SYSCALL_DEFINE1() 682 group = inotify_new_group(inotify_max_queued_events); SYSCALL_DEFINE1() 683 if (IS_ERR(group)) SYSCALL_DEFINE1() 684 return PTR_ERR(group); SYSCALL_DEFINE1() 686 ret = anon_inode_getfd("inotify", &inotify_fops, group, SYSCALL_DEFINE1() 689 fsnotify_destroy_group(group); SYSCALL_DEFINE1() 702 struct fsnotify_group *group; SYSCALL_DEFINE3() local 732 /* inode held in place by reference to path; group by fget on fd */ SYSCALL_DEFINE3() 734 group = f.file->private_data; SYSCALL_DEFINE3() 737 ret = inotify_update_watch(group, inode, mask); SYSCALL_DEFINE3() 746 struct fsnotify_group *group; SYSCALL_DEFINE2() local 760 group = f.file->private_data; SYSCALL_DEFINE2() 763 i_mark = inotify_idr_find(group, wd); SYSCALL_DEFINE2() 769 fsnotify_destroy_mark(&i_mark->fsn_mark, group); SYSCALL_DEFINE2() 490 inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) inotify_ignored_and_remove_idr() argument
|
H A D | inotify_fsnotify.c | 65 int inotify_handle_event(struct fsnotify_group *group, inotify_handle_event() argument 93 pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, inotify_handle_event() 111 ret = fsnotify_add_event(group, fsn_event, inotify_merge); inotify_handle_event() 114 fsnotify_destroy_event(group, fsn_event); inotify_handle_event() 118 fsnotify_destroy_mark(inode_mark, group); inotify_handle_event() 123 static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) inotify_freeing_mark() argument 125 inotify_ignored_and_remove_idr(fsn_mark, group); inotify_freeing_mark() 148 WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in " idr_callback() 158 printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n", idr_callback() 159 fsn_mark->group, fsn_mark->inode, i_mark->wd); idr_callback() 163 static void inotify_free_group_priv(struct fsnotify_group *group) inotify_free_group_priv() argument 166 idr_for_each(&group->inotify_data.idr, idr_callback, group); inotify_free_group_priv() 167 idr_destroy(&group->inotify_data.idr); inotify_free_group_priv() 168 if (group->inotify_data.user) { inotify_free_group_priv() 169 atomic_dec(&group->inotify_data.user->inotify_devs); inotify_free_group_priv() 170 free_uid(group->inotify_data.user); inotify_free_group_priv()
|
H A D | inotify.h | 24 struct fsnotify_group *group); 25 extern int inotify_handle_event(struct fsnotify_group *group,
|
/linux-4.1.27/fs/nilfs2/ |
H A D | alloc.c | 35 * nilfs_palloc_groups_per_desc_block - get the number of groups that a group 76 /* Number of blocks in a group including entry blocks and nilfs_palloc_init_blockgroup() 87 * nilfs_palloc_group - get group number and offset from an entry number 90 * @offset: pointer to store offset number in the group 95 __u64 group = nr; nilfs_palloc_group() local 97 *offset = do_div(group, nilfs_palloc_entries_per_group(inode)); nilfs_palloc_group() 98 return group; nilfs_palloc_group() 102 * nilfs_palloc_desc_blkoff - get block offset of a group descriptor block 104 * @group: group number 107 * block which contains a descriptor of the specified group. 110 nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) nilfs_palloc_desc_blkoff() argument 113 group / nilfs_palloc_groups_per_desc_block(inode); nilfs_palloc_desc_blkoff() 120 * @group: group number 123 * block used to allocate/deallocate entries in the specified group. 126 nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) nilfs_palloc_bitmap_blkoff() argument 129 group % nilfs_palloc_groups_per_desc_block(inode); nilfs_palloc_bitmap_blkoff() 130 return nilfs_palloc_desc_blkoff(inode, group) + 1 + nilfs_palloc_bitmap_blkoff() 135 * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group 137 * @group: group number 138 * @desc: pointer to descriptor structure for the group 141 nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group, nilfs_palloc_group_desc_nfrees() argument 146 spin_lock(nilfs_mdt_bgl_lock(inode, group)); nilfs_palloc_group_desc_nfrees() 148 spin_unlock(nilfs_mdt_bgl_lock(inode, group)); nilfs_palloc_group_desc_nfrees() 155 * @group: group number 156 * @desc: pointer to descriptor structure for the group 161 unsigned long group, nilfs_palloc_group_desc_add_entries() 165 spin_lock(nilfs_mdt_bgl_lock(inode, group)); nilfs_palloc_group_desc_add_entries() 167 spin_unlock(nilfs_mdt_bgl_lock(inode, group)); nilfs_palloc_group_desc_add_entries() 178 unsigned long group, group_offset; nilfs_palloc_entry_blkoff() local 180 group = nilfs_palloc_group(inode, nr, &group_offset); nilfs_palloc_entry_blkoff() 182 return nilfs_palloc_bitmap_blkoff(inode, group) + 1 + nilfs_palloc_entry_blkoff() 187 * nilfs_palloc_desc_block_init - initialize buffer of a group descriptor block 243 * nilfs_palloc_get_desc_block - get buffer head of a group descriptor block 245 * @group: group number 250 unsigned long group, nilfs_palloc_get_desc_block() 256 nilfs_palloc_desc_blkoff(inode, group), nilfs_palloc_get_desc_block() 264 * @group: group number 269 unsigned long group, nilfs_palloc_get_bitmap_block() 275 nilfs_palloc_bitmap_blkoff(inode, group), nilfs_palloc_get_bitmap_block() 299 * nilfs_palloc_block_get_group_desc - get kernel address of a group descriptor 301 * @group: group number 302 * @bh: buffer head of the buffer storing the group descriptor block 307 unsigned long group, nilfs_palloc_block_get_group_desc() 311 group % nilfs_palloc_groups_per_desc_block(inode); nilfs_palloc_block_get_group_desc() 334 * nilfs_palloc_find_available_slot - find available slot in a group 336 * @group: group number 337 * @target: offset number of an entry in the group (start point) 338 * @bitmap: bitmap of the group 342 unsigned long group, nilfs_palloc_find_available_slot() 356 nilfs_mdt_bgl_lock(inode, group), pos, bitmap)) nilfs_palloc_find_available_slot() 375 nilfs_mdt_bgl_lock(inode, group), pos, nilfs_palloc_find_available_slot() 385 * in a group descriptor block 387 * @curr: current group number 476 unsigned long group, maxgroup, ngroups; nilfs_palloc_prepare_alloc_entry() local 484 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_prepare_alloc_entry() 489 if (group >= ngroups) { nilfs_palloc_prepare_alloc_entry() 491 group = 0; nilfs_palloc_prepare_alloc_entry() 495 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh); nilfs_palloc_prepare_alloc_entry() 500 inode, group, desc_bh, desc_kaddr); nilfs_palloc_prepare_alloc_entry() 501 n = nilfs_palloc_rest_groups_in_desc_block(inode, group, nilfs_palloc_prepare_alloc_entry() 503 for (j = 0; j < n; j++, desc++, group++) { nilfs_palloc_prepare_alloc_entry() 504 if (nilfs_palloc_group_desc_nfrees(inode, group, desc) nilfs_palloc_prepare_alloc_entry() 507 inode, group, 1, &bitmap_bh); nilfs_palloc_prepare_alloc_entry() 513 inode, group, group_offset, bitmap, nilfs_palloc_prepare_alloc_entry() 518 inode, group, desc, -1); nilfs_palloc_prepare_alloc_entry() 520 entries_per_group * group + pos; nilfs_palloc_prepare_alloc_entry() 573 unsigned long group, group_offset; nilfs_palloc_commit_free_entry() local 577 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_commit_free_entry() 579 desc = nilfs_palloc_block_get_group_desc(inode, group, nilfs_palloc_commit_free_entry() 584 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), nilfs_palloc_commit_free_entry() 589 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); nilfs_palloc_commit_free_entry() 613 unsigned long group, group_offset; nilfs_palloc_abort_alloc_entry() local 615 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_abort_alloc_entry() 617 desc = nilfs_palloc_block_get_group_desc(inode, group, nilfs_palloc_abort_alloc_entry() 621 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), nilfs_palloc_abort_alloc_entry() 626 nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); nilfs_palloc_abort_alloc_entry() 648 unsigned long group, group_offset; nilfs_palloc_prepare_free_entry() local 651 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_prepare_free_entry() 652 ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh); nilfs_palloc_prepare_free_entry() 655 ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh); nilfs_palloc_prepare_free_entry() 683 * nilfs_palloc_group_is_in - judge if an entry is in a group 685 * @group: group number 689 nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr) nilfs_palloc_group_is_in() argument 693 first = group * nilfs_palloc_entries_per_group(inode); nilfs_palloc_group_is_in() 710 unsigned long group, group_offset; nilfs_palloc_freev() local 714 group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); nilfs_palloc_freev() 715 ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); nilfs_palloc_freev() 718 ret = nilfs_palloc_get_bitmap_block(inode, group, 0, nilfs_palloc_freev() 726 inode, group, desc_bh, desc_kaddr); nilfs_palloc_freev() 730 (j < nitems) && nilfs_palloc_group_is_in(inode, group, nilfs_palloc_freev() 735 nilfs_mdt_bgl_lock(inode, group), nilfs_palloc_freev() 745 nilfs_palloc_group_desc_add_entries(inode, group, desc, n); nilfs_palloc_freev() 160 nilfs_palloc_group_desc_add_entries(struct inode *inode, unsigned long group, struct nilfs_palloc_group_desc *desc, u32 n) nilfs_palloc_group_desc_add_entries() argument 249 nilfs_palloc_get_desc_block(struct inode *inode, unsigned long group, int create, struct buffer_head **bhp) nilfs_palloc_get_desc_block() argument 268 nilfs_palloc_get_bitmap_block(struct inode *inode, unsigned long group, int create, struct buffer_head **bhp) nilfs_palloc_get_bitmap_block() argument 306 nilfs_palloc_block_get_group_desc(const struct inode *inode, unsigned long group, const struct buffer_head *bh, void *kaddr) nilfs_palloc_block_get_group_desc() argument 341 nilfs_palloc_find_available_slot(struct inode *inode, unsigned long group, unsigned long target, unsigned char *bitmap, int bsize) nilfs_palloc_find_available_slot() argument
|
H A D | alloc.h | 33 * nilfs_palloc_entries_per_group - get the number of entries per group 36 * The number of entries per group is defined by the number of bits 56 * @pr_desc_bh: buffer head of the buffer containing block group descriptors 57 * @pr_bitmap_bh: buffer head of the buffer containing a block group bitmap
|
/linux-4.1.27/drivers/gpio/ |
H A D | gpio-lpc32xx.c | 174 static void __set_gpio_dir_p012(struct lpc32xx_gpio_chip *group, __set_gpio_dir_p012() argument 179 group->gpio_grp->dir_clr); __set_gpio_dir_p012() 182 group->gpio_grp->dir_set); __set_gpio_dir_p012() 185 static void __set_gpio_dir_p3(struct lpc32xx_gpio_chip *group, __set_gpio_dir_p3() argument 191 __raw_writel(u, group->gpio_grp->dir_clr); __set_gpio_dir_p3() 193 __raw_writel(u, group->gpio_grp->dir_set); __set_gpio_dir_p3() 196 static void __set_gpio_level_p012(struct lpc32xx_gpio_chip *group, __set_gpio_level_p012() argument 201 group->gpio_grp->outp_set); __set_gpio_level_p012() 204 group->gpio_grp->outp_clr); __set_gpio_level_p012() 207 static void __set_gpio_level_p3(struct lpc32xx_gpio_chip *group, __set_gpio_level_p3() argument 213 __raw_writel(u, group->gpio_grp->outp_set); __set_gpio_level_p3() 215 __raw_writel(u, group->gpio_grp->outp_clr); __set_gpio_level_p3() 218 static void __set_gpo_level_p3(struct lpc32xx_gpio_chip *group, __set_gpo_level_p3() argument 222 __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_set); __set_gpo_level_p3() 224 __raw_writel(GPO3_PIN_TO_BIT(pin), group->gpio_grp->outp_clr); __set_gpo_level_p3() 227 static int __get_gpio_state_p012(struct lpc32xx_gpio_chip *group, __get_gpio_state_p012() argument 230 return GPIO012_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), __get_gpio_state_p012() 234 static int __get_gpio_state_p3(struct lpc32xx_gpio_chip *group, __get_gpio_state_p3() argument 237 int state = __raw_readl(group->gpio_grp->inp_state); __get_gpio_state_p3() 246 static int __get_gpi_state_p3(struct lpc32xx_gpio_chip *group, __get_gpi_state_p3() argument 249 return GPI3_PIN_IN_SEL(__raw_readl(group->gpio_grp->inp_state), pin); __get_gpi_state_p3() 252 static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group, __get_gpo_state_p3() argument 255 return GPO3_PIN_IN_SEL(__raw_readl(group->gpio_grp->outp_state), pin); __get_gpo_state_p3() 264 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_dir_input_p012() local 266 __set_gpio_dir_p012(group, pin, 1); lpc32xx_gpio_dir_input_p012() 274 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_dir_input_p3() local 276 __set_gpio_dir_p3(group, pin, 1); lpc32xx_gpio_dir_input_p3() 289 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_get_value_p012() local 291 return __get_gpio_state_p012(group, pin); lpc32xx_gpio_get_value_p012() 296 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_get_value_p3() local 298 return __get_gpio_state_p3(group, pin); lpc32xx_gpio_get_value_p3() 303 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpi_get_value() local 305 return __get_gpi_state_p3(group, pin); lpc32xx_gpi_get_value() 311 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_dir_output_p012() local 313 __set_gpio_level_p012(group, pin, value); lpc32xx_gpio_dir_output_p012() 314 __set_gpio_dir_p012(group, pin, 0); lpc32xx_gpio_dir_output_p012() 322 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_dir_output_p3() local 324 __set_gpio_level_p3(group, pin, value); lpc32xx_gpio_dir_output_p3() 325 __set_gpio_dir_p3(group, pin, 0); lpc32xx_gpio_dir_output_p3() 333 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_dir_out_always() local 335 __set_gpo_level_p3(group, pin, value); lpc32xx_gpio_dir_out_always() 342 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_set_value_p012() local 344 __set_gpio_level_p012(group, pin, value); lpc32xx_gpio_set_value_p012() 350 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpio_set_value_p3() local 352 __set_gpio_level_p3(group, pin, value); lpc32xx_gpio_set_value_p3() 358 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpo_set_value() local 360 __set_gpo_level_p3(group, pin, value); lpc32xx_gpo_set_value() 365 struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); lpc32xx_gpo_get_value() local 367 return __get_gpo_state_p3(group, pin); lpc32xx_gpo_get_value()
|
/linux-4.1.27/security/tomoyo/ |
H A D | group.c | 2 * security/tomoyo/group.c 67 * @type: Type of this group. 73 struct tomoyo_group *group = tomoyo_get_group(param, type); tomoyo_write_group() local 75 if (!group) tomoyo_write_group() 77 param->list = &group->member_list; tomoyo_write_group() 109 tomoyo_put_group(group); tomoyo_write_group() 114 * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group. 117 * @group: Pointer to "struct tomoyo_path_group". 119 * Returns matched member's pathname if @pathname matches pathnames in @group, 126 const struct tomoyo_group *group) tomoyo_path_matches_group() 129 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_path_matches_group() 140 * tomoyo_number_matches_group - Check whether the given number matches members of the given number group. 144 * @group: Pointer to "struct tomoyo_number_group". 146 * Returns true if @min and @max partially overlaps @group, false otherwise. 152 const struct tomoyo_group *group) tomoyo_number_matches_group() 156 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_number_matches_group() 169 * tomoyo_address_matches_group - Check whether the given address matches members of the given address group. 173 * @group: Pointer to "struct tomoyo_address_group". 175 * Returns true if @address matches addresses in @group group, false otherwise. 180 const struct tomoyo_group *group) tomoyo_address_matches_group() 186 list_for_each_entry_rcu(member, &group->member_list, head.list) { tomoyo_address_matches_group() 125 tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group) tomoyo_path_matches_group() argument 150 tomoyo_number_matches_group(const unsigned long min, const unsigned long max, const struct tomoyo_group *group) tomoyo_number_matches_group() argument 179 tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, const struct tomoyo_group *group) tomoyo_address_matches_group() argument
|
H A D | Makefile | 1 obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
|
H A D | memory.c | 96 struct tomoyo_group *group = NULL; tomoyo_get_group() local 108 list_for_each_entry(group, list, head.list) { list_for_each_entry() 109 if (e.group_name != group->group_name || list_for_each_entry() 110 atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) list_for_each_entry() 112 atomic_inc(&group->head.users); list_for_each_entry() 122 group = entry; 129 return found ? group : NULL;
|
H A D | gc.c | 49 if (head->r.domain == element || head->r.group == element || tomoyo_struct_used_by_io_buffer() 212 tomoyo_put_group(entry->address.group); tomoyo_del_acl() 335 struct tomoyo_group *group = tomoyo_del_group() local 336 container_of(element, typeof(*group), head.list); tomoyo_del_group() 337 tomoyo_put_name(group->group_name); tomoyo_del_group() 390 * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} tomoyo_try_to_gc() 547 struct tomoyo_group *group; tomoyo_collect_entry() local 560 list_for_each_entry_safe(group, tmp, list, head.list) { list_for_each_entry_safe() 561 tomoyo_collect_member(id, &group->member_list); list_for_each_entry_safe() 562 if (!list_empty(&group->member_list) || list_for_each_entry_safe() 563 atomic_read(&group->head.users) > 0) list_for_each_entry_safe() 565 atomic_set(&group->head.users, list_for_each_entry_safe() 568 &group->head.list); list_for_each_entry_safe()
|
/linux-4.1.27/drivers/isdn/hardware/eicon/ |
H A D | capidtmf.c | 349 0xda97L * 2, /* 697 Hz (Low group 697 Hz) */ 350 0xd299L * 2, /* 770 Hz (Low group 770 Hz) */ 351 0xc8cbL * 2, /* 852 Hz (Low group 852 Hz) */ 352 0xbd36L * 2, /* 941 Hz (Low group 941 Hz) */ 353 0x9501L * 2, /* 1209 Hz (High group 1209 Hz) */ 354 0x7f89L * 2, /* 1336 Hz (High group 1336 Hz) */ 355 0x6639L * 2, /* 1477 Hz (High group 1477 Hz) */ 356 0x48c6L * 2, /* 1633 Hz (High group 1633 Hz) */ 357 0xe14cL * 2, /* 630 Hz (Lower guard of low group 631 Hz) */ 358 0xb2e0L * 2, /* 1015 Hz (Upper guard of low group 1039 Hz) */ 359 0xa1a0L * 2, /* 1130 Hz (Lower guard of high group 1140 Hz) */ 363 0x37d8L * 2, /* 1720 Hz (2nd harmonics of 852 Hz and upper guard of high group: 1715 Hz) */ 370 14, /* Low group peak versus 697 Hz */ 371 14, /* Low group peak versus 770 Hz */ 372 16, /* Low group peak versus 852 Hz */ 373 16, /* Low group peak versus 941 Hz */ 374 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1209 Hz */ 375 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1336 Hz */ 376 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1477 Hz */ 377 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1633 Hz */ 378 14, /* Low group peak versus 635 Hz */ 379 16, /* Low group peak versus 1010 Hz */ 380 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1140 Hz */ 381 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1272 Hz */ 382 DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 8, /* Low group peak versus 1405 Hz */ 383 DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4, /* Low group peak versus 1555 Hz */ 384 DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4, /* Low group peak versus 1715 Hz */ 385 12 /* Low group peak versus 100-630 Hz */ 391 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 697 Hz */ 392 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 770 Hz */ 393 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 852 Hz */ 394 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 941 Hz */ 395 20, /* High group peak versus 1209 Hz */ 396 20, /* High group peak versus 1336 Hz */ 397 20, /* High group peak versus 1477 Hz */ 398 20, /* High group peak versus 1633 Hz */ 399 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 635 Hz */ 400 CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 1010 Hz */ 401 16, /* High group peak versus 1140 Hz */ 402 4, /* High group peak versus 1272 Hz */ 403 6, /* High group peak versus 1405 Hz */ 404 8, /* High group peak versus 1555 Hz */ 405 16, /* High group peak versus 1715 Hz */ 406 12 /* High group peak versus 100-630 Hz */
|
H A D | mi_pc.h | 178 /* DRAM group coding (for CPU) */ 179 #define LO_RAS10_GREG 0x0008 /*Ras1..0 group low decode address*/ 180 #define HI_RAS10_GREG 0x0010 /*Ras1..0 group high decode address*/ 181 #define LO_RAS32_GREG 0x0018 /*Ras3..2 group low decode address */ 182 #define HI_RAS32_GREG 0x0020 /*Ras3..2 group high decode address */ 183 /* I/O CS group coding for (CPU) */ 184 #define LO_CS20_GREG 0x0028 /* CS2..0 group low decode register */ 185 #define HI_CS20_GREG 0x0030 /* CS2..0 group high decode register */ 186 #define LO_CS3B_GREG 0x0038 /* CS3 & PROM group low decode register */ 187 #define HI_CS3B_GREG 0x0040 /* CS3 & PROM group high decode register */ 190 #define RAS10_BANKSIZE 0x0c08 /* RAS 1..0 group PCI bank size */ 191 #define RAS32_BANKSIZE 0x0c0c /* RAS 3..2 group PCI bank size */ 192 #define CS20_BANKSIZE 0x0c10 /* CS 2..0 group PCI bank size */ 193 #define CS3B_BANKSIZE 0x0c14 /* CS 3 & Boot group PCI bank size */
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/ |
H A D | kernel_user_comm.c | 94 * i.e. registering for a group on 1 fs will get messages for that 95 * group from any fs */ 96 /** A single group registration has a uid and a file pointer */ 107 /** Add a receiver to a broadcast group 110 * @param group group number 112 int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data) libcfs_kkuc_group_add() argument 116 if (group > KUC_GRP_MAX) { libcfs_kkuc_group_add() 117 CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); libcfs_kkuc_group_add() 135 if (kkuc_groups[group].next == NULL) libcfs_kkuc_group_add() 136 INIT_LIST_HEAD(&kkuc_groups[group]); libcfs_kkuc_group_add() 137 list_add(®->kr_chain, &kkuc_groups[group]); libcfs_kkuc_group_add() 140 CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group); libcfs_kkuc_group_add() 146 int libcfs_kkuc_group_rem(int uid, int group) libcfs_kkuc_group_rem() argument 150 if (kkuc_groups[group].next == NULL) libcfs_kkuc_group_rem() 161 libcfs_kkuc_group_put(group, &lh); libcfs_kkuc_group_rem() 165 list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) { libcfs_kkuc_group_rem() 168 CDEBUG(D_KUC, "Removed uid=%d fp=%p from group %d\n", libcfs_kkuc_group_rem() 169 reg->kr_uid, reg->kr_fp, group); libcfs_kkuc_group_rem() 181 int libcfs_kkuc_group_put(int group, void *payload) libcfs_kkuc_group_put() argument 188 list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { libcfs_kkuc_group_put() 211 * Calls a callback function for each link of the given kuc group. 212 * @param group the group to call the function on. 216 int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func, libcfs_kkuc_group_foreach() argument 222 if (group > KUC_GRP_MAX) { libcfs_kkuc_group_foreach() 223 CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group); libcfs_kkuc_group_foreach() 227 /* no link for this group */ libcfs_kkuc_group_foreach() 228 if (kkuc_groups[group].next == NULL) libcfs_kkuc_group_foreach() 232 list_for_each_entry(reg, &kkuc_groups[group], kr_chain) { libcfs_kkuc_group_foreach()
|
/linux-4.1.27/arch/mn10300/proc-mn103e010/include/proc/ |
H A D | intctl-regs.h | 8 /* intr acceptance group reg */ 11 /* group number register */
|
/linux-4.1.27/arch/mn10300/proc-mn2ws0050/include/proc/ |
H A D | intctl-regs.h | 8 /* intr acceptance group reg */ 11 /* group number register */
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | irq.h | 19 * Returns a bitmask of pending interrupts in a group. 21 extern unsigned long intc_get_pending(unsigned int group);
|
/linux-4.1.27/fs/ext4/ |
H A D | resize.c | 61 ext4_group_t group) { ext4_meta_bg_first_group() 62 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) << ext4_meta_bg_first_group() 67 ext4_group_t group) { ext4_meta_bg_first_block_no() 68 group = ext4_meta_bg_first_group(sb, group); ext4_meta_bg_first_block_no() 69 return ext4_group_first_block_no(sb, group); ext4_meta_bg_first_block_no() 73 ext4_group_t group) { ext4_group_overhead_blocks() 75 overhead = ext4_bg_num_gdb(sb, group); ext4_group_overhead_blocks() 76 if (ext4_bg_has_super(sb, group)) ext4_group_overhead_blocks() 92 ext4_group_t group = input->group; verify_group_input() local 100 if (group != sbi->s_groups_count) { verify_group_input() 101 ext4_warning(sb, "Cannot add at group %u (only %u groups)", verify_group_input() 102 input->group, sbi->s_groups_count); verify_group_input() 106 overhead = ext4_group_overhead_blocks(sb, group); verify_group_input() 112 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks " verify_group_input() 114 ext4_bg_has_super(sb, input->group) ? "normal" : verify_group_input() 115 "no-super", input->group, input->blocks_count, verify_group_input() 120 ext4_warning(sb, "Last group not full"); verify_group_input() 131 ext4_warning(sb, "Block bitmap not in group (block %llu)", verify_group_input() 134 ext4_warning(sb, "Inode bitmap not in group (block %llu)", verify_group_input() 138 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)", verify_group_input() 176 * group each time. 180 in the flex group */ 181 __u16 *bg_flags; /* block group flags of groups 233 * and inode tables for a flex group. 236 * group tables from the 1st group of groups contained by @flexgd, which may 237 * be a partial of a flex group. 242 * block group. 255 ext4_group_t group; ext4_alloc_group_tables() local 262 src_group = group_data[0].group; ext4_alloc_group_tables() 268 group = group_data[0].group; ext4_alloc_group_tables() 269 if (src_group >= group_data[0].group + flex_gd->count) ext4_alloc_group_tables() 272 last_blk = start_blk + group_data[src_group - group].blocks_count; ext4_alloc_group_tables() 283 last_blk += group_data[src_group - group].blocks_count; ext4_alloc_group_tables() 293 group = ext4_get_group_number(sb, start_blk - 1); ext4_alloc_group_tables() 294 group -= group_data[0].group; ext4_alloc_group_tables() 295 group_data[group].free_blocks_count--; ext4_alloc_group_tables() 296 flex_gd->bg_flags[group] &= uninit_mask; ext4_alloc_group_tables() 304 group = ext4_get_group_number(sb, start_blk - 1); ext4_alloc_group_tables() 305 group -= group_data[0].group; ext4_alloc_group_tables() 306 group_data[group].free_blocks_count--; ext4_alloc_group_tables() 307 flex_gd->bg_flags[group] &= uninit_mask; ext4_alloc_group_tables() 318 group = ext4_get_group_number(sb, start_blk); ext4_alloc_group_tables() 319 next_group_start = ext4_group_first_block_no(sb, group + 1); ext4_alloc_group_tables() 320 group -= group_data[0].group; ext4_alloc_group_tables() 323 flex_gd->bg_flags[group + 1] &= uninit_mask; ext4_alloc_group_tables() 325 group_data[group + 1].free_blocks_count -= overhead; ext4_alloc_group_tables() 329 group_data[group].free_blocks_count -= itb; ext4_alloc_group_tables() 330 flex_gd->bg_flags[group] &= uninit_mask; ext4_alloc_group_tables() 336 group = group_data[0].group; ext4_alloc_group_tables() 338 printk(KERN_DEBUG "EXT4-fs: adding a flex group with " ext4_alloc_group_tables() 343 printk(KERN_DEBUG "adding %s group %u: %u " ext4_alloc_group_tables() 345 ext4_bg_has_super(sb, group + i) ? "normal" : ext4_alloc_group_tables() 346 "no-super", group + i, ext4_alloc_group_tables() 406 * @flex_gd: flex group data 418 ext4_group_t group; set_flexbg_block_bitmap() local 421 group = ext4_get_group_number(sb, block); set_flexbg_block_bitmap() 422 start = ext4_group_first_block_no(sb, group); set_flexbg_block_bitmap() 423 group -= flex_gd->groups[0].group; set_flexbg_block_bitmap() 429 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) { set_flexbg_block_bitmap() 438 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap); set_flexbg_block_bitmap() 466 * setup_new_flex_group_blocks handles a flex group as follow: 467 * 1. copy super block and GDT, and initialize group tables if necessary. 470 * 2. allocate group tables in block bitmaps, that is, set bits in block 471 * bitmap for blocks taken by group tables. 484 ext4_group_t group, count; setup_new_flex_group_blocks() local 490 group_data[0].group != sbi->s_groups_count); setup_new_flex_group_blocks() 500 group = group_data[0].group; setup_new_flex_group_blocks() 501 for (i = 0; i < flex_gd->count; i++, group++) { setup_new_flex_group_blocks() 505 gdblocks = ext4_bg_num_gdb(sb, group); setup_new_flex_group_blocks() 506 start = ext4_group_first_block_no(sb, group); setup_new_flex_group_blocks() 508 if (meta_bg == 0 && !ext4_bg_has_super(sb, group)) setup_new_flex_group_blocks() 513 first_group = ext4_meta_bg_first_group(sb, group); setup_new_flex_group_blocks() 514 if (first_group != group + 1 && setup_new_flex_group_blocks() 515 first_group != group + EXT4_DESC_PER_BLOCK(sb) - 1) setup_new_flex_group_blocks() 519 block = start + ext4_bg_has_super(sb, group); setup_new_flex_group_blocks() 520 /* Copy all of the GDT blocks into the backup in this group */ setup_new_flex_group_blocks() 524 ext4_debug("update backup group %#04llx\n", block); setup_new_flex_group_blocks() 553 /* Zero out all of the reserved backup group descriptor setup_new_flex_group_blocks() 556 if (ext4_bg_has_super(sb, group)) { setup_new_flex_group_blocks() 564 /* Initialize group tables of the grop @group */ setup_new_flex_group_blocks() 581 /* Initialize block bitmap of the @group */ setup_new_flex_group_blocks() 593 overhead = ext4_group_overhead_blocks(sb, group); setup_new_flex_group_blocks() 610 /* Initialize inode bitmap of the @group */ setup_new_flex_group_blocks() 632 /* Mark group tables in block bitmap */ setup_new_flex_group_blocks() 674 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 707 * It is assumed that they are stored in group order. Returns the number of 741 * Called when we need to bring a reserved group descriptor table block into 745 * block, in group order. Even though we know all the block numbers we need, 754 ext4_group_t group) add_new_gdb() 758 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); add_new_gdb() 770 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", add_new_gdb() 777 gdbackups = verify_reserved_gdb(sb, group, gdb_bh); add_new_gdb() 792 ext4_warning(sb, "new group %u GDT block %llu not reserved", add_new_gdb() 793 group, gdblock); add_new_gdb() 884 handle_t *handle, ext4_group_t group) { add_new_gdb_meta_bg() 888 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb); add_new_gdb_meta_bg() 891 gdblock = ext4_meta_bg_first_block_no(sb, group) + add_new_gdb_meta_bg() 892 ext4_bg_has_super(sb, group); add_new_gdb_meta_bg() 921 * Called when we are adding a new group which has a backup copy of each of 922 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 934 ext4_group_t group) reserve_backup_gdb() 978 gdbackups = verify_reserved_gdb(sb, group, primary[res]); reserve_backup_gdb() 999 * the new group to its reserved primary GDT block. reserve_backup_gdb() 1001 blk = group * EXT4_BLOCKS_PER_GROUP(sb); reserve_backup_gdb() 1034 * superblocks, and the location of the new group metadata in the GDT backups. 1052 ext4_group_t group = 0; update_backups() local 1059 group = 1; update_backups() 1065 group = ext4_list_backups(sb, &three, &five, &seven); update_backups() 1068 group = ext4_meta_bg_first_group(sb, group) + 1; update_backups() 1069 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2); update_backups() 1072 while (group < sbi->s_groups_count) { update_backups() 1084 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; update_backups() 1086 backup_block = (ext4_group_first_block_no(sb, group) + update_backups() 1087 ext4_bg_has_super(sb, group)); update_backups() 1096 ext4_group_first_block_no(sb, group)); update_backups() 1112 group = ext4_list_backups(sb, &three, &five, &seven); update_backups() 1113 else if (group == last) update_backups() 1116 group = last; update_backups() 1133 ext4_warning(sb, "can't update backup for group %u (err %d), " update_backups() 1134 "forcing fsck on next reboot", group, err); update_backups() 1142 * ext4_add_new_descs() adds @count group descriptor of groups 1143 * starting at @group 1147 * @group: the group no. of the first group desc to be added 1149 * @count: number of group descriptors to be added 1152 ext4_group_t group, struct inode *resize_inode, ext4_add_new_descs() 1162 for (i = 0; i < count; i++, group++) { ext4_add_new_descs() 1163 int reserved_gdb = ext4_bg_has_super(sb, group) ? ext4_add_new_descs() 1166 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); ext4_add_new_descs() 1167 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); ext4_add_new_descs() 1170 * We will only either add reserved group blocks to a backup group ext4_add_new_descs() 1171 * or remove reserved blocks for the first group in a new group block. ext4_add_new_descs() 1180 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group)) ext4_add_new_descs() 1181 err = reserve_backup_gdb(handle, resize_inode, group); ext4_add_new_descs() 1183 err = add_new_gdb_meta_bg(sb, handle, group); ext4_add_new_descs() 1185 err = add_new_gdb(handle, resize_inode, group); ext4_add_new_descs() 1209 ext4_group_t group, ext4_set_bitmap_checksums() 1221 ext4_inode_bitmap_csum_set(sb, group, gdp, bh, ext4_set_bitmap_checksums() 1228 ext4_block_bitmap_csum_set(sb, group, gdp, bh); ext4_set_bitmap_checksums() 1235 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1244 ext4_group_t group; ext4_setup_new_descs() local 1250 group = group_data->group; ext4_setup_new_descs() 1252 gdb_off = group % EXT4_DESC_PER_BLOCK(sb); ext4_setup_new_descs() 1253 gdb_num = group / EXT4_DESC_PER_BLOCK(sb); ext4_setup_new_descs() 1259 /* Update group descriptor block for new group */ ext4_setup_new_descs() 1266 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data); ext4_setup_new_descs() 1280 ext4_group_desc_csum_set(sb, group, gdp); ext4_setup_new_descs() 1289 * We can allocate memory for mb_alloc based on the new group ext4_setup_new_descs() 1292 err = ext4_mb_add_groupinfo(sb, group, gdp); ext4_setup_new_descs() 1320 * increasing the group count so that once the group is enabled, ext4_update_super() 1323 * We always allocate group-by-group, then block-by-block or ext4_update_super() 1324 * inode-by-inode within a group, so enabling these ext4_update_super() 1325 * blocks/inodes before the group is live won't actually let us ext4_update_super() 1358 * NB. These rules can be relaxed when checking the group count ext4_update_super() 1360 * group after serialising against the group count, and we can ext4_update_super() 1371 /* Update the reserved block counts only once the new group is ext4_update_super() 1388 flex_group = ext4_flex_group(sbi, group_data[0].group); ext4_update_super() 1401 printk(KERN_DEBUG "EXT4-fs: added group %u:" ext4_update_super() 1406 /* Add a flex group to an fs. Ensure we handle all possible error conditions 1418 ext4_group_t group; ext4_flex_group_add() local 1427 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); ext4_flex_group_add() 1435 * blocks. If we are adding a group past the last current GDT block, ext4_flex_group_add() 1437 * are adding a group with superblock/GDT backups we will also ext4_flex_group_add() 1455 group = flex_gd->groups[0].group; ext4_flex_group_add() 1456 BUG_ON(group != EXT4_SB(sb)->s_groups_count); ext4_flex_group_add() 1457 err = ext4_add_new_descs(handle, sb, group, ext4_flex_group_add() 1476 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb); ext4_flex_group_add() 1477 int gdb_num_end = ((group + flex_gd->count - 1) / ext4_flex_group_add() 1509 ext4_group_t group; ext4_setup_next_flex_gd() local 1522 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); ext4_setup_next_flex_gd() 1526 last_group = group | (flexbg_size - 1); ext4_setup_next_flex_gd() 1530 flex_gd->count = last_group - group + 1; ext4_setup_next_flex_gd() 1535 group_data[i].group = group + i; ext4_setup_next_flex_gd() 1537 overhead = ext4_group_overhead_blocks(sb, group + i); ext4_setup_next_flex_gd() 1549 /* We need to initialize block bitmap of last group. */ ext4_setup_next_flex_gd() 1561 /* Add group descriptor data to an existing or new group descriptor block. 1567 * Otherwise, we may need to add backup GDT blocks for a sparse group. 1570 * in the new group's counts to the superblock. Prior to that we have 1571 * not really "added" the group at all. We re-check that we are still 1572 * adding in the last group in case things have changed since verifying. 1579 int reserved_gdb = ext4_bg_has_super(sb, input->group) ? ext4_group_add() 1586 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb); ext4_group_add() 1626 err = ext4_alloc_flex_bg_array(sb, input->group + 1); ext4_group_add() 1630 err = ext4_mb_alloc_groupinfo(sb, input->group + 1); ext4_group_add() 1644 * extend a group without checking assuming that checking has been done. 1654 * one group descriptor via ext4_group_add_blocks(). ext4_group_extend_no_check() 1674 /* We add the blocks to the bitmap and set the group need init bit */ ext4_group_extend_no_check() 1688 printk(KERN_DEBUG "EXT4-fs: extended group to %llu " ext4_group_extend_no_check() 1699 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1714 ext4_group_t group; ext4_group_extend() local 1720 "extending last group from %llu to %llu blocks", ext4_group_extend() 1740 /* Handle the remaining blocks in the last group only. */ ext4_group_extend() 1741 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last); ext4_group_extend() 1759 ext4_warning(sb, "will only finish group (%llu blocks, %u new)", ext4_group_extend() 1936 n_group--; /* set to last group number */ ext4_resize_fs() 1962 /* extend the last group */ ext4_resize_fs() 1990 /* Add flex groups. Note that a regular group is a ext4_resize_fs() 1991 * flex group with 1 group. ext4_resize_fs() 60 ext4_meta_bg_first_group(struct super_block *sb, ext4_group_t group) ext4_meta_bg_first_group() argument 66 ext4_meta_bg_first_block_no(struct super_block *sb, ext4_group_t group) ext4_meta_bg_first_block_no() argument 72 ext4_group_overhead_blocks(struct super_block *sb, ext4_group_t group) ext4_group_overhead_blocks() argument 753 add_new_gdb(handle_t *handle, struct inode *inode, ext4_group_t group) add_new_gdb() argument 883 add_new_gdb_meta_bg(struct super_block *sb, handle_t *handle, ext4_group_t group) add_new_gdb_meta_bg() argument 933 reserve_backup_gdb(handle_t *handle, struct inode *inode, ext4_group_t group) reserve_backup_gdb() argument 1151 ext4_add_new_descs(handle_t *handle, struct super_block *sb, ext4_group_t group, struct inode *resize_inode, ext4_group_t count) ext4_add_new_descs() argument 1208 ext4_set_bitmap_checksums(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct ext4_new_group_data *group_data) ext4_set_bitmap_checksums() argument
|
H A D | ialloc.c | 39 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 42 * The file system contains group descriptors which are located after the 77 * allocation, essentially implementing a per-group read-only flag. */ ext4_init_inode_bitmap() 79 ext4_error(sb, "Checksum bad for group %u", block_group); ext4_init_inode_bitmap() 362 * for a particular block group or flex_bg. If flex_size is 1, then g 363 * is a block group number; otherwise it is flex_bg number. 397 * Otherwise we simply return a random group. 401 * It's OK to put directory into a group unless 405 * Parent's group is preferred, if it doesn't satisfy these 407 * of the groups look good we just look for a group with more 408 * free inodes than average (starting at parent's group). 412 ext4_group_t *group, umode_t mode, find_group_orlov() 478 *group = grp; find_group_orlov() 486 * start at 2nd block group of the flexgroup. See find_group_orlov() 495 *group = grp+i; find_group_orlov() 509 * Start looking in the flex group where we last allocated an find_group_orlov() 541 *group = grp; find_group_orlov() 560 ext4_group_t *group, umode_t mode) find_group_other() 568 * Try to place the inode is the same flex group as its find_group_other() 570 * find another flex group, and store that information in the find_group_other() 572 * group for future allocations. find_group_other() 585 *group = i; find_group_other() 596 * to find a new flex group; we pass in the mode to find_group_other() 599 *group = parent_group + flex_size; find_group_other() 600 if (*group > ngroups) find_group_other() 601 *group = 0; find_group_other() 602 return find_group_orlov(sb, parent, group, mode, NULL); find_group_other() 608 *group = parent_group; find_group_other() 609 desc = ext4_get_group_desc(sb, *group, NULL); find_group_other() 623 *group = (*group + parent->i_ino) % ngroups; find_group_other() 626 * Use a quadratic hash to find a group with a free inode and some free find_group_other() 630 *group += i; find_group_other() 631 if (*group >= ngroups) find_group_other() 632 *group -= ngroups; find_group_other() 633 desc = ext4_get_group_desc(sb, *group, NULL); find_group_other() 640 * That failed: try linear search for a free inode, even if that group find_group_other() 643 *group = parent_group; find_group_other() 645 if (++*group >= ngroups) find_group_other() 646 *group = 0; find_group_other() 647 desc = ext4_get_group_desc(sb, *group, NULL); find_group_other() 664 static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino) recently_deleted() argument 673 gdp = ext4_get_group_desc(sb, group, NULL); recently_deleted() 702 * a directory, then a forward search is made for a block group with both 704 * the groups with above-average free space, that group with the fewest 708 * group to find a free inode. 718 ext4_group_t ngroups, group = 0; __ext4_new_inode() local 764 group = (goal - 1) / EXT4_INODES_PER_GROUP(sb); __ext4_new_inode() 771 ret2 = find_group_orlov(sb, dir, &group, mode, qstr); __ext4_new_inode() 773 ret2 = find_group_other(sb, dir, &group, mode); __ext4_new_inode() 776 EXT4_I(dir)->i_last_alloc_group = group; __ext4_new_inode() 783 * unless we get unlucky and it turns out the group we selected __ext4_new_inode() 789 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); __ext4_new_inode() 797 if (++group == ngroups) __ext4_new_inode() 798 group = 0; __ext4_new_inode() 802 grp = ext4_get_group_info(sb, group); __ext4_new_inode() 805 if (++group == ngroups) __ext4_new_inode() 806 group = 0; __ext4_new_inode() 811 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); __ext4_new_inode() 814 if (++group == ngroups) __ext4_new_inode() 815 group = 0; __ext4_new_inode() 825 if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { __ext4_new_inode() 831 recently_deleted(sb, group, ino)) { __ext4_new_inode() 852 ext4_lock_group(sb, group); __ext4_new_inode() 854 ext4_unlock_group(sb, group); __ext4_new_inode() 862 if (++group == ngroups) __ext4_new_inode() 863 group = 0; __ext4_new_inode() 888 block_bitmap_bh = ext4_read_block_bitmap(sb, group); __ext4_new_inode() 905 ext4_lock_group(sb, group); __ext4_new_inode() 909 ext4_free_clusters_after_init(sb, group, gdp)); __ext4_new_inode() 910 ext4_block_bitmap_csum_set(sb, group, gdp, __ext4_new_inode() 912 ext4_group_desc_csum_set(sb, group, gdp); __ext4_new_inode() 914 ext4_unlock_group(sb, group); __ext4_new_inode() 926 struct ext4_group_info *grp = ext4_get_group_info(sb, group); __ext4_new_inode() 929 ext4_lock_group(sb, group); /* while we modify the bg desc */ __ext4_new_inode() 938 * relative inode number in this group. if it is greater __ext4_new_inode() 946 ext4_lock_group(sb, group); __ext4_new_inode() 953 ext4_group_t f = ext4_flex_group(sbi, group); __ext4_new_inode() 959 ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh, __ext4_new_inode() 961 ext4_group_desc_csum_set(sb, group, gdp); __ext4_new_inode() 963 ext4_unlock_group(sb, group); __ext4_new_inode() 977 flex_group = ext4_flex_group(sbi, group); __ext4_new_inode() 981 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); __ext4_new_inode() 996 ei->i_block_group = group; __ext4_new_inode() 1209 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n", ext4_count_free_inodes() 1251 * inode allocation from the current group, so we take alloc_sem lock, to 1254 int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, ext4_init_inode_table() argument 1257 struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_init_inode_table() 1271 gdp = ext4_get_group_desc(sb, group, &group_desc_bh); ext4_init_inode_table() 1300 ext4_error(sb, "Something is wrong with group %u: " ext4_init_inode_table() 1303 group, used_blks, ext4_init_inode_table() 1326 ext4_debug("going to zero out inode table in group %d\n", ext4_init_inode_table() 1327 group); ext4_init_inode_table() 1335 ext4_lock_group(sb, group); ext4_init_inode_table() 1337 ext4_group_desc_csum_set(sb, group, gdp); ext4_init_inode_table() 1338 ext4_unlock_group(sb, group); ext4_init_inode_table() 411 find_group_orlov(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode, const struct qstr *qstr) find_group_orlov() argument 559 find_group_other(struct super_block *sb, struct inode *parent, ext4_group_t *group, umode_t mode) find_group_other() argument
|
H A D | mballoc.c | 52 * - track min/max extents in each group for better group selection 63 * group preallocation or inode preallocation depending on the size of 67 * select to use the group preallocation. The default value of 72 * The main motivation for having small file use group preallocation is to 95 * have the group allocation flag set then we look at the locality group 100 * The reason for having a per cpu locality group is to reduce the contention 103 * The locality group prealloc space is used looking at whether we have 106 * If we can't allocate blocks via inode prealloc or/and locality group 112 * each group is loaded via ext4_mb_load_buddy. The information involve 117 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 120 * one block each for bitmap and buddy information. So for each group we 137 * we are doing a group prealloc we try to normalize the request to 143 * stripe=<value> option the group prealloc request is normalized to the 159 * not, we search in the specific group using bitmap for best extents. The 164 * the group specified as the goal value in allocation context via 165 * ac_g_ex. Each group is first checked based on the criteria whether it 191 * - locality group 192 * assigned to specific locality group which does not translate to 193 * permanent set of inodes: inode can join and leave group. space 216 * - use locality group PA on-disk += N; PA -= N 217 * - discard locality group PA buddy -= PA; PA = 0 238 * until PA is linked to allocation group to avoid concurrent buddy init 244 * - use locality group PA 246 * - discard locality group PA 253 * - use locality group PA 255 * - discard locality group PA 262 * - use locality group PA 264 * - discard locality group PA 284 * load group 287 * release group 290 * find proper PA (per-inode or group) 291 * load group 293 * release group 297 * load group 299 * release group 301 * - discard preallocations in group: 305 * load group 306 * remove PA from object (inode or locality group) 316 * - bitlock on a group (group) 323 * group 330 * group 334 * group 337 * - discard all for given object (inode, locality group): 340 * group 342 * - discard all for given group: 343 * group 345 * group 366 ext4_group_t group); 368 ext4_group_t group); 514 "corruption in group %u " mb_cmp_bitmaps() 701 * group. 722 void *buddy, void *bitmap, ext4_group_t group) ext4_mb_generate_buddy() 724 struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_mb_generate_buddy() 754 ext4_grp_locked_error(sb, group, 0, 0, ext4_mb_generate_buddy() 759 * If we intend to continue, we consider group descriptor ext4_mb_generate_buddy() 798 * for convenience. The information regarding each group 804 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 808 * So for each group we take up 2 blocks. A page can 813 * Locking note: This routine takes the block group lock of all groups 825 ext4_group_t first_group, group; ext4_mb_init_cache() local 861 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { ext4_mb_init_cache() 862 if (group >= ngroups) ext4_mb_init_cache() 865 grinfo = ext4_get_group_info(sb, group); ext4_mb_init_cache() 868 * which added some new uninitialized group info structs, so ext4_mb_init_cache() 876 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) { ext4_mb_init_cache() 880 mb_debug(1, "read bitmap for group %u\n", group); ext4_mb_init_cache() 884 for (i = 0, group = first_group; i < groups_per_page; i++, group++) { ext4_mb_init_cache() 885 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) { ext4_mb_init_cache() 893 group = (first_block + i) >> 1; ext4_mb_init_cache() 894 if (group >= ngroups) ext4_mb_init_cache() 897 if (!bh[group - first_group]) ext4_mb_init_cache() 903 * particular group in the format specified ext4_mb_init_cache() 908 bitmap = bh[group - first_group]->b_data; ext4_mb_init_cache() 917 mb_debug(1, "put buddy for group %u in page %lu/%x\n", ext4_mb_init_cache() 918 group, page->index, i * blocksize); ext4_mb_init_cache() 919 trace_ext4_mb_buddy_bitmap_load(sb, group); ext4_mb_init_cache() 920 grinfo = ext4_get_group_info(sb, group); ext4_mb_init_cache() 926 * incore got set to the group block bitmap below ext4_mb_init_cache() 928 ext4_lock_group(sb, group); ext4_mb_init_cache() 931 ext4_mb_generate_buddy(sb, data, incore, group); ext4_mb_init_cache() 932 ext4_unlock_group(sb, group); ext4_mb_init_cache() 937 mb_debug(1, "put bitmap for group %u in page %lu/%x\n", ext4_mb_init_cache() 938 group, page->index, i * blocksize); ext4_mb_init_cache() 939 trace_ext4_mb_bitmap_load(sb, group); ext4_mb_init_cache() 942 ext4_lock_group(sb, group); ext4_mb_init_cache() 946 ext4_mb_generate_from_pa(sb, data, group); ext4_mb_init_cache() 947 ext4_mb_generate_from_freelist(sb, data, group); ext4_mb_init_cache() 948 ext4_unlock_group(sb, group); ext4_mb_init_cache() 975 ext4_group_t group, struct ext4_buddy *e4b) ext4_mb_get_buddy_page_lock() 989 * So for each group we need two blocks. ext4_mb_get_buddy_page_lock() 991 block = group * 2; ext4_mb_get_buddy_page_lock() 1030 * block group lock of all groups for this page; do not hold the BG lock when 1034 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) ext4_mb_init_group() argument 1043 mb_debug(1, "init group %u\n", group); ext4_mb_init_group() 1044 this_grp = ext4_get_group_info(sb, group); ext4_mb_init_group() 1047 * page which map to the group from which we are already ext4_mb_init_group() 1054 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); ext4_mb_init_group() 1057 * somebody initialized the group ext4_mb_init_group() 1097 * block group lock of all groups for this page; do not hold the BG lock when 1101 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, ext4_mb_load_buddy() argument 1115 mb_debug(1, "load group %u\n", group); ext4_mb_load_buddy() 1118 grp = ext4_get_group_info(sb, group); ext4_mb_load_buddy() 1123 e4b->bd_group = group; ext4_mb_load_buddy() 1129 * we need full data about the group ext4_mb_load_buddy() 1132 ret = ext4_mb_init_group(sb, group); ext4_mb_load_buddy() 1140 * So for each group we need two blocks. ext4_mb_load_buddy() 1142 block = group * 2; ext4_mb_load_buddy() 1365 * Releasing entire group is all about clearing mb_buddy_mark_free() 1421 /* Don't bother if the block group is corrupt. */ mb_free_blocks() 1456 /* Mark the block group as corrupt. */ mb_free_blocks() 1621 * Must be called under group lock! 1647 * group until we update the bitmap. That would mean we ext4_mb_use_best_found() 1782 ext4_group_t group = ex.fe_group; ext4_mb_try_best_found() local 1787 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); ext4_mb_try_best_found() 1791 ext4_lock_group(ac->ac_sb, group); ext4_mb_try_best_found() 1799 ext4_unlock_group(ac->ac_sb, group); ext4_mb_try_best_found() 1809 ext4_group_t group = ac->ac_g_ex.fe_group; ext4_mb_find_by_goal() local 1813 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); ext4_mb_find_by_goal() 1821 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); ext4_mb_find_by_goal() 1830 ext4_lock_group(ac->ac_sb, group); ext4_mb_find_by_goal() 1863 ext4_unlock_group(ac->ac_sb, group); ext4_mb_find_by_goal() 1913 * The routine scans the group and measures all found extents. 1915 * free blocks in the group, so the routine can know upper limit. 1938 * free blocks even though group info says we ext4_mb_complex_scan_group() 1943 "group info. But bitmap says 0", ext4_mb_complex_scan_group() 1953 "group info. But got %d blocks", ext4_mb_complex_scan_group() 1991 /* find first stripe-aligned block in group */ ext4_mb_scan_aligned() 2015 ext4_group_t group, int cr) ext4_mb_good_group() 2019 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); ext4_mb_good_group() 2034 int ret = ext4_mb_init_group(ac->ac_sb, group); ext4_mb_good_group() 2050 ((group % flex_size) == 0)) ext4_mb_good_group() 2081 ext4_group_t ngroups, group, i; ext4_mb_regular_allocator() local 2144 * searching for the right group start ext4_mb_regular_allocator() 2147 group = ac->ac_g_ex.fe_group; ext4_mb_regular_allocator() 2149 for (i = 0; i < ngroups; group++, i++) { ext4_mb_regular_allocator() 2153 * files makes group > ngroups possible on first loop. ext4_mb_regular_allocator() 2155 if (group >= ngroups) ext4_mb_regular_allocator() 2156 group = 0; ext4_mb_regular_allocator() 2159 if (!ext4_mb_good_group(ac, group, cr)) ext4_mb_regular_allocator() 2162 err = ext4_mb_load_buddy(sb, group, &e4b); ext4_mb_regular_allocator() 2166 ext4_lock_group(sb, group); ext4_mb_regular_allocator() 2170 * block group ext4_mb_regular_allocator() 2172 if (!ext4_mb_good_group(ac, group, cr)) { ext4_mb_regular_allocator() 2173 ext4_unlock_group(sb, group); ext4_mb_regular_allocator() 2187 ext4_unlock_group(sb, group); ext4_mb_regular_allocator() 2227 ext4_group_t group; ext4_mb_seq_groups_start() local 2231 group = *pos + 1; ext4_mb_seq_groups_start() 2232 return (void *) ((unsigned long) group); ext4_mb_seq_groups_start() 2238 ext4_group_t group; ext4_mb_seq_groups_next() local 2243 group = *pos + 1; ext4_mb_seq_groups_next() 2244 return (void *) ((unsigned long) group); ext4_mb_seq_groups_next() 2250 ext4_group_t group = (ext4_group_t) ((unsigned long) v); ext4_mb_seq_groups_show() local 2260 group--; ext4_mb_seq_groups_show() 2261 if (group == 0) ext4_mb_seq_groups_show() 2265 "group", "free", "frags", "first", ext4_mb_seq_groups_show() 2271 grinfo = ext4_get_group_info(sb, group); ext4_mb_seq_groups_show() 2272 /* Load the group info in memory only if not already loaded. */ ext4_mb_seq_groups_show() 2274 err = ext4_mb_load_buddy(sb, group, &e4b); ext4_mb_seq_groups_show() 2276 seq_printf(seq, "#%-5u: I/O error\n", group); ext4_mb_seq_groups_show() 2282 memcpy(&sg, ext4_get_group_info(sb, group), i); ext4_mb_seq_groups_show() 2287 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, ext4_mb_seq_groups_show() 2357 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); ext4_mb_alloc_groupinfo() 2372 /* Create and initialize ext4_group_info data for the given group. */ ext4_mb_add_groupinfo() 2373 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ext4_mb_add_groupinfo() argument 2383 * First check if this group is the first of a reserved block. ext4_mb_add_groupinfo() 2387 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { ext4_mb_add_groupinfo() 2393 "for a buddy group"); ext4_mb_add_groupinfo() 2396 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = ext4_mb_add_groupinfo() 2401 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; ext4_mb_add_groupinfo() 2402 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); ext4_mb_add_groupinfo() 2418 ext4_free_clusters_after_init(sb, group, desc); ext4_mb_add_groupinfo() 2435 bh = ext4_read_block_bitmap(sb, group); ext4_mb_add_groupinfo() 2447 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { ext4_mb_add_groupinfo() 2448 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); ext4_mb_add_groupinfo() 2449 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL; ext4_mb_add_groupinfo() 2607 * The default group preallocation is 512, which for 4k block ext4_mb_init() 2610 * is 1 megabyte, then group preallocation size becomes half a ext4_mb_init() 2612 * group pralloc size for cluster sizes up to 64k, and after ext4_mb_init() 2613 * that, we will force a minimum group preallocation size of ext4_mb_init() 2669 /* need to called with the ext4 group lock held */ ext4_mb_cleanup_pa() 2775 mb_debug(1, "gonna free %u blocks in group %u (0x%p):", ext4_free_data_callback() 2784 " group:%d block:%d count:%d failed" ext4_free_data_callback() 2800 /* Take it out of per group rb tree */ ext4_free_data_callback() 2805 * Clear the trimmed flag for the group so that the next ext4_free_data_callback() 2814 /* No more items in the per group rb tree ext4_free_data_callback() 2902 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_mb_mark_diskspace_used() 2981 * here we normalize request for locality group 2987 * XXX: should we try to preallocate more than the group has now? 2996 mb_debug(1, "#%u: goal %u blocks for locality group\n", ext4_mb_normalize_group_request() 3282 * use blocks preallocated to locality group 3297 * possible race when the group is being loaded concurrently ext4_mb_use_group_pa() 3302 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa); ext4_mb_use_group_pa() 3382 /* can we use group allocation? */ ext4_mb_use_preallocated() 3386 /* inode may have no locality group for some reason */ ext4_mb_use_preallocated() 3424 * the function goes through all block freed in the group 3427 * Need to be called with the ext4 group lock held 3430 ext4_group_t group) ext4_mb_generate_from_freelist() 3436 grp = ext4_get_group_info(sb, group); ext4_mb_generate_from_freelist() 3448 * the function goes through all preallocation in this group and marks them 3450 * Need to be called with ext4 group lock held 3454 ext4_group_t group) ext4_mb_generate_from_pa() 3456 struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_mb_generate_from_pa() 3464 /* all form of preallocation discards first load group, ext4_mb_generate_from_pa() 3481 BUG_ON(groupnr != group); ext4_mb_generate_from_pa() 3485 mb_debug(1, "prellocated %u for group %u\n", preallocated, group); ext4_mb_generate_from_pa() 3525 * If doing group-based preallocation, pa_pstart may be in the ext4_mb_put_pa() 3526 * next group when pa is used up ext4_mb_put_pa() 3540 * drop PA from group ext4_mb_put_pa() 3653 * creates new preallocated space for locality group inodes belongs to 3688 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa, ext4_mb_new_group_pa() 3727 * @pa must be unlinked from inode and group lists, so that 3729 * the caller MUST hold group/inode locks. 3740 ext4_group_t group; ext4_mb_release_inode_pa() local 3747 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); ext4_mb_release_inode_pa() 3749 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); ext4_mb_release_inode_pa() 3757 mb_debug(1, " free preallocated %u/%u in group %u\n", ext4_mb_release_inode_pa() 3758 (unsigned) ext4_group_first_block_no(sb, group) + bit, ext4_mb_release_inode_pa() 3759 (unsigned) next - bit, (unsigned) group); ext4_mb_release_inode_pa() 3762 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); ext4_mb_release_inode_pa() 3775 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", ext4_mb_release_inode_pa() 3792 ext4_group_t group; ext4_mb_release_group_pa() local 3797 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); ext4_mb_release_group_pa() 3798 BUG_ON(group != e4b->bd_group && pa->pa_len != 0); ext4_mb_release_group_pa() 3801 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); ext4_mb_release_group_pa() 3807 * releases all preallocations in given group 3817 ext4_group_t group, int needed) ext4_mb_discard_group_preallocations() 3819 struct ext4_group_info *grp = ext4_get_group_info(sb, group); ext4_mb_discard_group_preallocations() 3828 mb_debug(1, "discard preallocation for group %u\n", group); ext4_mb_discard_group_preallocations() 3833 bitmap_bh = ext4_read_block_bitmap(sb, group); ext4_mb_discard_group_preallocations() 3835 ext4_error(sb, "Error reading block bitmap for %u", group); ext4_mb_discard_group_preallocations() 3839 err = ext4_mb_load_buddy(sb, group, &e4b); ext4_mb_discard_group_preallocations() 3841 ext4_error(sb, "Error loading buddy information for %u", group); ext4_mb_discard_group_preallocations() 3851 ext4_lock_group(sb, group); ext4_mb_discard_group_preallocations() 3880 ext4_unlock_group(sb, group); ext4_mb_discard_group_preallocations() 3894 /* remove from object (inode or locality group) */ ext4_mb_discard_group_preallocations() 3909 ext4_unlock_group(sb, group); ext4_mb_discard_group_preallocations() 3930 ext4_group_t group = 0; ext4_discard_preallocations() local 3996 group = ext4_get_group_number(sb, pa->pa_pstart); ext4_discard_preallocations() 3998 err = ext4_mb_load_buddy(sb, group, &e4b); ext4_discard_preallocations() 4001 group); ext4_discard_preallocations() 4005 bitmap_bh = ext4_read_block_bitmap(sb, group); ext4_discard_preallocations() 4008 group); ext4_discard_preallocations() 4013 ext4_lock_group(sb, group); ext4_discard_preallocations() 4016 ext4_unlock_group(sb, group); ext4_discard_preallocations() 4092 * We use locality group preallocation for small size file. The size of the 4126 /* don't use group allocation for large files */ ext4_mb_group_or_file() 4135 * locality group prealloc space are per cpu. The reason for having ext4_mb_group_or_file() 4136 * per cpu locality group is to reduce the contention between block ext4_mb_group_or_file() 4141 /* we're going to use group allocation */ ext4_mb_group_or_file() 4144 /* serialize all allocations in the group */ ext4_mb_group_or_file() 4155 ext4_group_t group; ext4_mb_initialize_context() local 4160 /* we can't allocate > group size */ ext4_mb_initialize_context() 4172 ext4_get_group_no_and_offset(sb, goal, &group, &block); ext4_mb_initialize_context() 4180 ac->ac_o_ex.fe_group = group; ext4_mb_initialize_context() 4187 * locality group. this is a policy, actually */ ext4_mb_initialize_context() 4206 ext4_group_t group = 0; ext4_mb_discard_lg_preallocations() local 4211 mb_debug(1, "discard locality group preallocation\n"); ext4_mb_discard_lg_preallocations() 4257 group = ext4_get_group_number(sb, pa->pa_pstart); ext4_mb_discard_lg_preallocations() 4258 if (ext4_mb_load_buddy(sb, group, &e4b)) { ext4_mb_discard_lg_preallocations() 4260 group); ext4_mb_discard_lg_preallocations() 4263 ext4_lock_group(sb, group); ext4_mb_discard_lg_preallocations() 4266 ext4_unlock_group(sb, group); ext4_mb_discard_lg_preallocations() 4540 * AND the blocks are associated with the same group. 4556 ext4_group_t group = e4b->bd_group; ext4_mb_free_metadata() local 4589 ext4_grp_locked_error(sb, group, 0, ext4_mb_free_metadata() 4590 ext4_group_first_block_no(sb, group) + ext4_mb_free_metadata() 4742 * Check to see if we are freeing blocks across a group ext4_free_blocks() 4823 * with group lock held. generate_buddy look at ext4_free_blocks() 4824 * them with group lock_held ext4_free_blocks() 4830 " group:%d block:%d count:%lu failed" ext4_free_blocks() 4863 /* And the group descriptor block */ ext4_free_blocks() 4864 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ext4_free_blocks() 4882 * ext4_group_add_blocks() -- Add given blocks to an existing group 4885 * @block: start physical block to add to the block group 4911 * Check to see if we are freeing blocks across a group ext4_group_add_blocks() 4915 ext4_warning(sb, "too much blocks added to group %u\n", ext4_group_add_blocks() 4977 * with group lock held. generate_buddy look at ext4_group_add_blocks() 4978 * them with group lock_held ext4_group_add_blocks() 5003 /* And the group descriptor block */ ext4_group_add_blocks() 5004 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ext4_group_add_blocks() 5016 * ext4_trim_extent -- function to TRIM one single free extent in the group 5018 * @start: starting block of the free extent in the alloc. group 5020 * @group: alloc. group we are working with 5021 * @e4b: ext4 buddy for the group 5023 * Trim "count" blocks starting at "start" in the "group". To assure that no 5025 * be called with under the group lock. 5028 ext4_group_t group, struct ext4_buddy *e4b) __releases() 5035 trace_ext4_trim_extent(sb, group, start, count); __releases() 5037 assert_spin_locked(ext4_group_lock_ptr(sb, group)); __releases() 5040 ex.fe_group = group; __releases() 5048 ext4_unlock_group(sb, group); __releases() 5049 ret = ext4_issue_discard(sb, group, start, count); __releases() 5050 ext4_lock_group(sb, group); __releases() 5056 * ext4_trim_all_free -- function to trim all free space in alloc. group 5058 * @group: group to be trimmed 5059 * @start: first group block to examine 5060 * @max: last group block to examine 5063 * ext4_trim_all_free walks through group's buddy bitmap searching for free 5068 * ext4_trim_all_free walks through group's block bitmap searching for free 5069 * extents. When the free extent is found, mark it as used in group buddy 5071 * the group buddy bitmap. This is done until whole group is scanned. 5074 ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_trim_all_free() argument 5083 trace_ext4_trim_all_free(sb, group, start, max); ext4_trim_all_free() 5085 ret = ext4_mb_load_buddy(sb, group, &e4b); ext4_trim_all_free() 5088 "information for %u", group); ext4_trim_all_free() 5093 ext4_lock_group(sb, group); ext4_trim_all_free() 5109 next - start, group, &e4b); ext4_trim_all_free() 5124 ext4_unlock_group(sb, group); ext4_trim_all_free() 5126 ext4_lock_group(sb, group); ext4_trim_all_free() 5138 ext4_unlock_group(sb, group); ext4_trim_all_free() 5141 ext4_debug("trimmed %d blocks in the group %d\n", ext4_trim_all_free() 5142 count, group); ext4_trim_all_free() 5156 * start to start+len. For each such a group ext4_trim_all_free function 5162 ext4_group_t group, first_group, last_group; ext4_trim_fs() local 5186 /* Determine first and last group to examine based on start and end */ ext4_trim_fs() 5192 /* end now represents the last cluster to discard in this group */ ext4_trim_fs() 5195 for (group = first_group; group <= last_group; group++) { ext4_trim_fs() 5196 grp = ext4_get_group_info(sb, group); ext4_trim_fs() 5199 ret = ext4_mb_init_group(sb, group); ext4_trim_fs() 5207 * change it for the last group, note that last_cluster is ext4_trim_fs() 5210 if (group == last_group) ext4_trim_fs() 5214 cnt = ext4_trim_all_free(sb, group, first_cluster, ext4_trim_fs() 5224 * For every group except the first one, we are sure ext4_trim_fs() 721 ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group) ext4_mb_generate_buddy() argument 974 ext4_mb_get_buddy_page_lock(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) ext4_mb_get_buddy_page_lock() argument 2014 ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr) ext4_mb_good_group() argument 3429 ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group) ext4_mb_generate_from_freelist() argument 3453 ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) ext4_mb_generate_from_pa() argument 3816 ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int needed) ext4_mb_discard_group_preallocations() argument
|
H A D | balloc.c | 32 * Calculate block group number for a given block number 37 ext4_group_t group; ext4_get_group_number() local 40 group = (block - ext4_get_group_number() 44 ext4_get_group_no_and_offset(sb, block, &group, NULL); ext4_get_group_number() 45 return group; ext4_get_group_number() 49 * Calculate the block group number and offset into the block/cluster 96 * block group descriptors, and reserved block group ext4_num_overhead_clusters() 102 * to check to see if the block is in the block group. If it ext4_num_overhead_clusters() 166 * last group, just in case some other tool was used, num_clusters_in_group() 192 * essentially implementing a per-group read-only flag. */ ext4_init_block_bitmap() 236 * Also if the number of blocks within the group is less than ext4_init_block_bitmap() 247 /* Return the number of free blocks in a block group. It is used when 260 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 263 * The file system contains group descriptors which are located after the 270 * ext4_get_group_desc() -- load group descriptor from disk 272 * @block_group: given block group 274 * group descriptor 327 * blocks may not be in the group at all ext4_valid_block_bitmap() 329 * or it has to also read the block group where the bitmaps ext4_valid_block_bitmap() 404 * @block_group: given block group 653 * Adds up the number of free clusters from each block group. 689 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n", ext4_count_free_clusters() 730 * ext4_bg_has_super - number of blocks used by the superblock in group 732 * @group: group number to check 735 * in this group. Currently this will be only 0 or 1. 737 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group) ext4_bg_has_super() argument 741 if (group == 0) ext4_bg_has_super() 744 if (group == le32_to_cpu(es->s_backup_bgs[0]) || ext4_bg_has_super() 745 group == le32_to_cpu(es->s_backup_bgs[1])) ext4_bg_has_super() 749 if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb, ext4_bg_has_super() 752 if (!(group & 1)) ext4_bg_has_super() 754 if (test_root(group, 3) || (test_root(group, 5)) || ext4_bg_has_super() 755 test_root(group, 7)) ext4_bg_has_super() 762 ext4_group_t group) ext4_bg_num_gdb_meta() 764 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); ext4_bg_num_gdb_meta() 768 if (group == first || group == first + 1 || group == last) ext4_bg_num_gdb_meta() 774 ext4_group_t group) ext4_bg_num_gdb_nometa() 776 if (!ext4_bg_has_super(sb, group)) ext4_bg_num_gdb_nometa() 786 * ext4_bg_num_gdb - number of blocks used by the group table in group 788 * @group: group number to check 790 * Return the number of blocks used by the group descriptor table 791 * (primary or backup) in this group. In the future there may be a 792 * different number of descriptor blocks in each group. 794 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group) ext4_bg_num_gdb() argument 798 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb); ext4_bg_num_gdb() 802 return ext4_bg_num_gdb_nometa(sb, group); ext4_bg_num_gdb() 804 return ext4_bg_num_gdb_meta(sb,group); ext4_bg_num_gdb() 810 * the beginning of a block group, including the reserved gdt blocks. 818 /* Check for superblock and gdt backups in this group */ ext4_num_base_meta_clusters() 854 * group for directories and special files. Regular ext4_inode_to_goal_block() 855 * files will start at the second block group. This ext4_inode_to_goal_block() 761 ext4_bg_num_gdb_meta(struct super_block *sb, ext4_group_t group) ext4_bg_num_gdb_meta() argument 773 ext4_bg_num_gdb_nometa(struct super_block *sb, ext4_group_t group) ext4_bg_num_gdb_nometa() argument
|
H A D | bitmap.c | 18 int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, ext4_inode_bitmap_csum_verify() argument 40 void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, ext4_inode_bitmap_csum_set() argument 56 int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, ext4_block_bitmap_csum_verify() argument 82 void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, ext4_block_bitmap_csum_set() argument
|
H A D | mballoc.h | 77 * We use locality group prealloc space for stream request. 88 * default group prealloc size 512 blocks 102 /* group which free block extent belongs */ 127 unsigned short pa_type; /* pa type. inode or group */ 145 * Locality group: 146 * we try to group all related changes together
|
/linux-4.1.27/fs/ext2/ |
H A D | ialloc.c | 30 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 33 * The file system contains group descriptors which are located after the 65 static void ext2_release_inode(struct super_block *sb, int group, int dir) ext2_release_inode() argument 70 desc = ext2_get_group_desc(sb, group, &bh); ext2_release_inode() 73 "can't get descriptor for group %d", group); ext2_release_inode() 77 spin_lock(sb_bgl_lock(EXT2_SB(sb), group)); ext2_release_inode() 81 spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); ext2_release_inode() 185 * Figure out the offset within the block group inode table ext2_preread_inode() 196 * a directory, then a forward search is made for a block group with both 198 * the groups with above-average free space, that group with the fewest 202 * group to find a free inode. 209 int group, best_group = -1; find_group_dir() local 211 for (group = 0; group < ngroups; group++) { find_group_dir() 212 desc = ext2_get_group_desc (sb, group, NULL); find_group_dir() 220 best_group = group; find_group_dir() 237 * Otherwise we simply return a random group. 241 * It's OK to put directory into a group unless 246 * Parent's group is preferred, if it doesn't satisfy these 248 * of the groups look good we just look for a group with more 249 * free inodes than average (starting at parent's group). 272 int group = -1, i; find_group_orlov() local 287 group = prandom_u32(); find_group_orlov() 288 parent_group = (unsigned)group % ngroups; find_group_orlov() 290 group = (parent_group + i) % ngroups; find_group_orlov() 291 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov() 300 best_group = group; find_group_orlov() 306 group = best_group; find_group_orlov() 330 group = (parent_group + i) % ngroups; find_group_orlov() 331 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov() 334 if (sbi->s_debts[group] >= max_debt) find_group_orlov() 347 group = (parent_group + i) % ngroups; find_group_orlov() 348 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov() 367 return group; find_group_orlov() 375 int group, i; find_group_other() local 380 group = parent_group; find_group_other() 381 desc = ext2_get_group_desc (sb, group, NULL); find_group_other() 395 group = (group + parent->i_ino) % ngroups; find_group_other() 398 * Use a quadratic hash to find a group with a free inode and some find_group_other() 402 group += i; find_group_other() 403 if (group >= ngroups) find_group_other() 404 group -= ngroups; find_group_other() 405 desc = ext2_get_group_desc (sb, group, NULL); find_group_other() 412 * That failed: try linear search for a free inode, even if that group find_group_other() 415 group = parent_group; find_group_other() 417 if (++group >= ngroups) find_group_other() 418 group = 0; find_group_other() 419 desc = ext2_get_group_desc (sb, group, NULL); find_group_other() 427 return group; find_group_other() 436 int group, i; ext2_new_inode() local 455 group = find_group_dir(sb, dir); ext2_new_inode() 457 group = find_group_orlov(sb, dir); ext2_new_inode() 459 group = find_group_other(sb, dir); ext2_new_inode() 461 if (group == -1) { ext2_new_inode() 467 gdp = ext2_get_group_desc(sb, group, &bh2); ext2_new_inode() 469 bitmap_bh = read_inode_bitmap(sb, group); ext2_new_inode() 482 * free inodes in this group, but by the time we tried ext2_new_inode() 486 * next block group. ext2_new_inode() 488 if (++group == sbi->s_groups_count) ext2_new_inode() 489 group = 0; ext2_new_inode() 492 if (ext2_set_bit_atomic(sb_bgl_lock(sbi, group), ext2_new_inode() 496 /* this group is exhausted, try next group */ ext2_new_inode() 497 if (++group == sbi->s_groups_count) ext2_new_inode() 498 group = 0; ext2_new_inode() 501 /* try to find free inode in the same group */ ext2_new_inode() 518 ino += group * EXT2_INODES_PER_GROUP(sb) + 1; ext2_new_inode() 522 "block_group = %d,inode=%lu", group, ext2_new_inode() 532 spin_lock(sb_bgl_lock(sbi, group)); ext2_new_inode() 535 if (sbi->s_debts[group] < 255) ext2_new_inode() 536 sbi->s_debts[group]++; ext2_new_inode() 539 if (sbi->s_debts[group]) ext2_new_inode() 540 sbi->s_debts[group]--; ext2_new_inode() 542 spin_unlock(sb_bgl_lock(sbi, group)); ext2_new_inode() 565 ei->i_block_group = group; ext2_new_inode() 640 printk("group %d: stored = %d, counted = %u\n", ext2_count_free_inodes()
|
H A D | balloc.c | 27 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 30 * The file system contains group descriptors which are located after the 250 * @grp_goal: given goal block relative to the allocation block group 251 * @group: the current allocation block group 254 * Test if the given goal block (group relative) is within the file's 257 * If the reservation window is outside the goal allocation group, return 0; 265 unsigned int group, struct super_block * sb) goal_in_my_reservation() 269 group_first_block = ext2_group_first_block_no(sb, group); goal_in_my_reservation() 509 * Check to see if we are freeing blocks across a group ext2_free_blocks() 571 * @start: the starting block (group relative) of the search 572 * @bh: bufferhead contains the block group bitmap 573 * @maxblocks: the ending block (group relative) of the reservation 592 * @start: the starting block (group relative) to find next 594 * @bh: bufferhead contains the block group bitmap 595 * @maxblocks: the ending block (group relative) for the search 644 * @group: given allocation block group 646 * @grp_goal: given target block within the group 658 * ends at the block group's last block. 664 ext2_try_to_allocate(struct super_block *sb, int group, ext2_try_to_allocate() argument 675 group_first_block = ext2_group_first_block_no(sb, group); ext2_try_to_allocate() 679 /* reservation window cross group boundary */ ext2_try_to_allocate() 683 /* reservation window crosses group boundary */ ext2_try_to_allocate() 716 if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), grp_goal, ext2_try_to_allocate() 731 && !ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), ext2_try_to_allocate() 765 * group. The search will end when we found the start of next 825 * have a reservation across the group boundary here find_next_reservable_window() 865 * reservation list (the list that inside the group). We try to 867 * or the beginning of the group, if there is no goal. 871 * it. If there is no free block until the end of group, then the 872 * whole group is full, we failed. Otherwise, check if the free 883 * failed: we failed to find a reservation window in this group 887 * @grp_goal: The goal (group-relative). It is where the search for a 891 * of the group. 894 * @group: the group we are trying to allocate in 895 * @bitmap_bh: the block group block bitmap 900 unsigned int group, struct buffer_head *bitmap_bh) alloc_new_reservation() 910 group_first_block = ext2_group_first_block_no(sb, group); alloc_new_reservation() 922 * if the old reservation is cross group boundary alloc_new_reservation() 926 * that belongs to the next group. In this case, there is no alloc_new_reservation() 928 * in this group(which will fail). we should alloc_new_reservation() 932 * window to the first block of next group. alloc_new_reservation() 1068 * @group: given allocation block group 1070 * @grp_goal: given target block within the group 1092 ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group, ext2_try_to_allocate_with_rsv() argument 1108 return ext2_try_to_allocate(sb, group, bitmap_bh, ext2_try_to_allocate_with_rsv() 1112 * grp_goal is a group relative block number (if there is a goal) ext2_try_to_allocate_with_rsv() 1115 * first block is the block number of the first block in this group ext2_try_to_allocate_with_rsv() 1117 group_first_block = ext2_group_first_block_no(sb, group); ext2_try_to_allocate_with_rsv() 1138 grp_goal, group, sb)) { ext2_try_to_allocate_with_rsv() 1142 group, bitmap_bh); ext2_try_to_allocate_with_rsv() 1147 grp_goal, group, sb)) ext2_try_to_allocate_with_rsv() 1163 ret = ext2_try_to_allocate(sb, group, bitmap_bh, grp_goal, ext2_try_to_allocate_with_rsv() 1206 * each block group the search first looks for an entire free byte in the block 1222 ext2_grpblk_t free_blocks; /* number of free blocks in a group */ ext2_new_blocks() 1311 * group_no and gdp correctly point to the last group visited. ext2_new_blocks() 1323 * skip this group (and avoid loading bitmap) if there ext2_new_blocks() 1329 * skip this group if the number of ext2_new_blocks() 1341 * try to allocate block(s) from this group, without a goal(-1). ext2_new_blocks() 1367 ext2_debug("using block group %d(%d)\n", ext2_new_blocks() 1470 printk ("group %d: stored = %d, counted = %lu\n", ext2_count_free_blocks() 1499 static int ext2_group_sparse(int group) ext2_group_sparse() argument 1501 if (group <= 1) ext2_group_sparse() 1503 return (test_root(group, 3) || test_root(group, 5) || ext2_group_sparse() 1504 test_root(group, 7)); ext2_group_sparse() 1508 * ext2_bg_has_super - number of blocks used by the superblock in group 1510 * @group: group number to check 1513 * in this group. Currently this will be only 0 or 1. 1515 int ext2_bg_has_super(struct super_block *sb, int group) ext2_bg_has_super() argument 1518 !ext2_group_sparse(group)) ext2_bg_has_super() 1524 * ext2_bg_num_gdb - number of blocks used by the group table in group 1526 * @group: group number to check 1528 * Return the number of blocks used by the group descriptor table 1529 * (primary or backup) in this group. In the future there may be a 1530 * different number of descriptor blocks in each group. 1532 unsigned long ext2_bg_num_gdb(struct super_block *sb, int group) ext2_bg_num_gdb() argument 1534 return ext2_bg_has_super(sb, group) ? EXT2_SB(sb)->s_gdb_count : 0; ext2_bg_num_gdb() 264 goal_in_my_reservation(struct ext2_reserve_window *rsv, ext2_grpblk_t grp_goal, unsigned int group, struct super_block * sb) goal_in_my_reservation() argument 898 alloc_new_reservation(struct ext2_reserve_window_node *my_rsv, ext2_grpblk_t grp_goal, struct super_block *sb, unsigned int group, struct buffer_head *bitmap_bh) alloc_new_reservation() argument
|
/linux-4.1.27/arch/mips/rb532/ |
H A D | irq.c | 87 static inline int group_to_ip(unsigned int group) group_to_ip() argument 89 return group + 2; group_to_ip() 115 unsigned int group, intr_bit, irq_nr = d->irq; rb532_enable_irq() local 122 group = ip >> 5; rb532_enable_irq() 127 enable_local_irq(group_to_ip(group)); rb532_enable_irq() 129 addr = intr_group[group].base_addr; rb532_enable_irq() 136 unsigned int group, intr_bit, mask, irq_nr = d->irq; rb532_disable_irq() local 143 group = ip >> 5; rb532_disable_irq() 147 addr = intr_group[group].base_addr; rb532_disable_irq() 153 if (group == GPIO_MAPPED_IRQ_GROUP && irq_nr <= (GROUP4_IRQ_BASE + 13)) rb532_disable_irq() 158 * group, disable corresponding IP rb532_disable_irq() 160 if (mask == intr_group[group].mask) rb532_disable_irq() 161 disable_local_irq(group_to_ip(group)); rb532_disable_irq() 174 int group = irq_to_group(d->irq); rb532_set_type() local 176 if (group != GPIO_MAPPED_IRQ_GROUP || d->irq > (GROUP4_IRQ_BASE + 13)) rb532_set_type() 216 unsigned int ip, pend, group; plat_irq_dispatch() local 225 group = 21 + (fls(ip) - 32); plat_irq_dispatch() 227 addr = intr_group[group].base_addr; plat_irq_dispatch() 232 do_IRQ((group << 5) + pend); plat_irq_dispatch()
|
/linux-4.1.27/include/linux/platform_data/ |
H A D | gpio-lpc32xx.h | 24 * GPI pins : 28xP3 group 25 * GPO pins : 24xP3 group 26 * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group 46 * See the LPC32x0 User's guide for GPIO group numbers
|
H A D | keypad-omap.h | 31 * keys pressed in the same group are considered as pressed. This is 34 * must be available for use as group bits. The below GROUP_SHIFT
|
/linux-4.1.27/fs/configfs/ |
H A D | item.c | 125 void config_group_init_type_name(struct config_group *group, const char *name, config_group_init_type_name() argument 128 config_item_set_name(&group->cg_item, name); config_group_init_type_name() 129 group->cg_item.ci_type = type; config_group_init_type_name() 130 config_group_init(group); config_group_init_type_name() 179 * config_group_init - initialize a group for use 180 * @group: config_group 182 void config_group_init(struct config_group *group) config_group_init() argument 184 config_item_init(&group->cg_item); config_group_init() 185 INIT_LIST_HEAD(&group->cg_children); config_group_init() 190 * config_group_find_item - search for item in group. 191 * @group: group we're looking in. 194 * Iterate over @group->cg_list, looking for a matching config_item. 196 * Caller must have locked group via @group->cg_subsys->su_mtx. 198 struct config_item *config_group_find_item(struct config_group *group, config_group_find_item() argument 204 list_for_each(entry, &group->cg_children) { config_group_find_item()
|
H A D | dir.c | 89 * from the youngest non-default group ancestor. 91 * For a non-default group A having default groups A/B, A/C, and A/C/D, default 93 * default_group_class[0], and default group A/C/D will be in 127 * sd->s_depth == -1 iff we are a non default group. configfs_adjust_dir_dirent_depth_before_populate() 128 * else (we are a default group) sd->s_depth > 0 (see configfs_adjust_dir_dirent_depth_before_populate() 133 * We are a non default group and we are going to create configfs_adjust_dir_dirent_depth_before_populate() 447 * Fake invisibility if dir belongs to a group/default groups hierarchy configfs_lookup() 500 /* Mark that we're trying to drop the group */ configfs_detach_prep() 609 static void detach_groups(struct config_group *group) detach_groups() argument 611 struct dentry * dentry = dget(group->cg_item.ci_dentry); detach_groups() 654 struct config_group *group) create_default_group() 661 if (!group->cg_item.ci_name) create_default_group() 662 group->cg_item.ci_name = group->cg_item.ci_namebuf; create_default_group() 665 child = d_alloc_name(parent, group->cg_item.ci_name); create_default_group() 670 &group->cg_item, child); create_default_group() 684 static int populate_groups(struct config_group *group) populate_groups() argument 690 if (group->default_groups) { populate_groups() 691 for (i = 0; group->default_groups[i]; i++) { populate_groups() 692 new_group = group->default_groups[i]; populate_groups() 694 ret = create_default_group(group, new_group); populate_groups() 696 detach_groups(group); populate_groups() 712 struct config_group *group; unlink_obj() local 714 group = item->ci_group; unlink_obj() 715 if (group) { unlink_obj() 725 config_group_put(group); unlink_obj() 732 * Parent seems redundant with group, but it makes certain link_obj() 751 static void unlink_group(struct config_group *group) unlink_group() argument 756 if (group->default_groups) { unlink_group() 757 for (i = 0; group->default_groups[i]; i++) { unlink_group() 758 new_group = group->default_groups[i]; unlink_group() 763 group->cg_subsys = NULL; unlink_group() 764 unlink_obj(&group->cg_item); unlink_group() 767 static void link_group(struct config_group *parent_group, struct config_group *group) link_group() argument 773 link_obj(&parent_group->cg_item, &group->cg_item); link_group() 778 subsys = to_configfs_subsystem(group); link_group() 781 group->cg_subsys = subsys; link_group() 783 if (group->default_groups) { link_group() 784 for (i = 0; group->default_groups[i]; i++) { link_group() 785 new_group = group->default_groups[i]; link_group() 786 link_group(group, new_group); link_group() 854 * We must lock the group's inode to avoid races with the VFS configfs_attach_group() 878 /* Caller holds the mutex of the group's inode */ configfs_detach_group() 1159 struct config_group *group = NULL; configfs_mkdir() local 1171 * Fake invisibility if dir belongs to a group/default groups hierarchy configfs_mkdir() 1222 group = type->ct_group_ops->make_group(to_config_group(parent_item), name); configfs_mkdir() 1223 if (!group) configfs_mkdir() 1224 group = ERR_PTR(-ENOMEM); configfs_mkdir() 1225 if (!IS_ERR(group)) { configfs_mkdir() 1226 link_group(to_config_group(parent_item), group); configfs_mkdir() local 1227 item = &group->cg_item; configfs_mkdir() 1229 ret = PTR_ERR(group); configfs_mkdir() 1285 if (group) configfs_mkdir() 1302 if (group) configfs_mkdir() 1303 unlink_group(group); configfs_mkdir() 1487 * Fake invisibility if dir belongs to a group/default groups hierarchy configfs_dir_open() 1642 struct config_group *group = &subsys->su_group; configfs_register_subsystem() local 1651 if (!group->cg_item.ci_name) configfs_register_subsystem() 1652 group->cg_item.ci_name = group->cg_item.ci_namebuf; configfs_register_subsystem() 1655 link_group(to_config_group(sd->s_element), group); configfs_register_subsystem() 1660 dentry = d_alloc_name(root, group->cg_item.ci_name); configfs_register_subsystem() 1664 err = configfs_attach_group(sd->s_element, &group->cg_item, configfs_register_subsystem() 1680 unlink_group(group); configfs_register_subsystem() 1689 struct config_group *group = &subsys->su_group; configfs_unregister_subsystem() local 1690 struct dentry *dentry = group->cg_item.ci_dentry; configfs_unregister_subsystem() 1708 configfs_detach_group(&group->cg_item); configfs_unregister_subsystem() 1719 unlink_group(group); configfs_unregister_subsystem() 653 create_default_group(struct config_group *parent_group, struct config_group *group) create_default_group() argument
|
/linux-4.1.27/include/uapi/linux/netfilter/ |
H A D | xt_NFLOG.h | 13 __u16 group; member in struct:xt_nflog_info
|
/linux-4.1.27/include/uapi/linux/netfilter_bridge/ |
H A D | ebt_nflog.h | 16 __u16 group; member in struct:ebt_nflog_info
|
/linux-4.1.27/include/linux/ |
H A D | fsnotify_backend.h | 84 * Each group much define these ops. The fsnotify infrastructure will call 85 * these operations for each relevant group. 87 * should_send_event - given a group, inode, and mask this function determines 88 * if the group is interested in this event. 89 * handle_event - main call for a group to handle an fs event 90 * free_group_priv - called when a group refcnt hits 0 to clean up the private union 91 * freeing_mark - called when a mark is being destroyed for some reason. The group 97 int (*handle_event)(struct fsnotify_group *group, 103 void (*free_group_priv)(struct fsnotify_group *group); 104 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); 110 * a group. If you want to carry more info from the accessing task to the 121 * A group is a "thing" that wants to receive notification about filesystem 122 * events. The mask holds the subset of event types this group cares about. 123 * refcnt on a group is up to the implementor and at any moment if it goes 0 128 * How the refcnt is used is up to each group. When the refcnt hits 0 129 * fsnotify will clean up all of the resources associated with this group. 130 * As an example, the dnotify group will always have a refcnt=1 and that 131 * will never change. Inotify, on the other hand, has a group per 135 atomic_t refcnt; /* things with interest in this group */ 137 const struct fsnotify_ops *ops; /* how this group handles things */ 141 struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ 146 * Valid fsnotify group priorities. Events are send in order from highest 154 /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ 158 * a group */ 159 struct list_head marks_list; /* all inode marks for this group */ 180 /* allows a group to block waiting for a userspace response */ 214 struct fsnotify_group *group; /* group this mark is for */ member in struct:fsnotify_mark 215 struct list_head g_list; /* list of marks by group->i_fsnotify_marks 220 spinlock_t lock; /* protect group and inode */ 298 /* create a new group */ 300 /* get reference to a group */ 301 extern void fsnotify_get_group(struct fsnotify_group *group); 302 /* drop reference on a group from fsnotify_alloc_group */ 303 extern void fsnotify_put_group(struct fsnotify_group *group); 304 /* destroy group */ 305 extern void fsnotify_destroy_group(struct fsnotify_group *group); 309 extern void fsnotify_destroy_event(struct fsnotify_group *group, 311 /* attach the event to the group notification queue */ 312 extern int fsnotify_add_event(struct fsnotify_group *group, 317 extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event); 318 /* true if the group notification queue is empty */ 319 extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); 321 extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); 323 extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); 332 /* find (and take a reference) to a mark associated with group and inode */ 333 extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); 334 /* find (and take a reference) to a mark associated with group and vfsmount */ 335 extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); 342 /* attach the mark to both the group and the inode */ 343 extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, 345 extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group, 347 /* given a group and a mark, flag mark to be freed when all references are dropped */ 349 struct fsnotify_group *group); 351 struct fsnotify_group *group); 352 /* run all the marks in a group, and clear all of the vfsmount marks */ 353 extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); 354 /* run all the marks in a group, and clear all of the inode marks */ 355 extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); 356 /* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ 357 extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); 358 /* run all the marks in a group, and flag them to be freed */ 359 extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
|
H A D | configfs.h | 90 * group - a group of config_items of a specific type, belonging 100 extern void config_group_init(struct config_group *group); 101 extern void config_group_init_type_name(struct config_group *group, 110 static inline struct config_group *config_group_get(struct config_group *group) config_group_get() argument 112 return group ? to_config_group(config_item_get(&group->cg_item)) : NULL; config_group_get() 115 static inline void config_group_put(struct config_group *group) config_group_put() argument 117 config_item_put(&group->cg_item); config_group_put() 212 * items. If the item is a group, it may support mkdir(2). 214 * group supports make_group(), one can create group children. If it 217 * default_groups on group->default_groups, it has automatically created 218 * group children. default_groups may coexist alongsize make_group() or 219 * make_item(), but if the group wishes to have only default_groups 221 * If the group has commit(), it supports pending and committed (active) 233 struct config_item *(*make_item)(struct config_group *group, const char *name); 234 struct config_group *(*make_group)(struct config_group *group, const char *name); 236 void (*disconnect_notify)(struct config_group *group, struct config_item *item); 237 void (*drop_item)(struct config_group *group, struct config_item *item); 245 static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group) to_configfs_subsystem() argument 247 return group ? to_configfs_subsystem() 248 container_of(group, struct configfs_subsystem, su_group) : to_configfs_subsystem()
|
H A D | iommu.h | 208 struct iommu_group *group); 210 struct iommu_group *group); 212 extern void *iommu_group_get_iommudata(struct iommu_group *group); 213 extern void iommu_group_set_iommudata(struct iommu_group *group, 216 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 217 extern int iommu_group_add_device(struct iommu_group *group, 220 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 223 extern void iommu_group_put(struct iommu_group *group); 224 extern int iommu_group_register_notifier(struct iommu_group *group, 226 extern int iommu_group_unregister_notifier(struct iommu_group *group, 228 extern int iommu_group_id(struct iommu_group *group); 377 struct iommu_group *group) iommu_attach_group() 383 struct iommu_group *group) iommu_detach_group() 392 static inline void *iommu_group_get_iommudata(struct iommu_group *group) iommu_group_get_iommudata() argument 397 static inline void iommu_group_set_iommudata(struct iommu_group *group, iommu_group_set_iommudata() argument 403 static inline int iommu_group_set_name(struct iommu_group *group, iommu_group_set_name() argument 409 static inline int iommu_group_add_device(struct iommu_group *group, iommu_group_add_device() argument 419 static inline int iommu_group_for_each_dev(struct iommu_group *group, iommu_group_for_each_dev() argument 431 static inline void iommu_group_put(struct iommu_group *group) iommu_group_put() argument 435 static inline int iommu_group_register_notifier(struct iommu_group *group, iommu_group_register_notifier() argument 441 static inline int iommu_group_unregister_notifier(struct iommu_group *group, iommu_group_unregister_notifier() argument 447 static inline int iommu_group_id(struct iommu_group *group) iommu_group_id() argument 376 iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_attach_group() argument 382 iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_detach_group() argument
|
H A D | vfio.h | 72 struct iommu_group *group); 74 struct iommu_group *group); 87 extern void vfio_group_put_external_user(struct vfio_group *group); 88 extern int vfio_external_user_iommu_id(struct vfio_group *group); 89 extern long vfio_external_check_extension(struct vfio_group *group, 96 extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, 108 static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, vfio_spapr_iommu_eeh_ioctl() argument
|
H A D | nfs_page.h | 30 PG_HEADLOCK, /* page group lock of wb_head */ 31 PG_TEARDOWN, /* page group sync for destroy */ 32 PG_UNLOCKPAGE, /* page group sync bit in read path */ 33 PG_UPTODATE, /* page group sync bit in read path */ 34 PG_WB_END, /* page group sync bit in write path */ 35 PG_REMOVE, /* page group sync bit in write path */
|
H A D | connector.h | 58 u32 seq, group; member in struct:cn_callback_entry 74 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); 75 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
H A D | genl_magic_func.h | 12 #define GENL_mc_group(group) 274 * Magic: define multicast group registration helper 279 #define GENL_mc_group(group) { .name = #group, }, 285 #define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group), CONCAT_() 290 #define GENL_mc_group(group) \ 291 static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \ 295 CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \ 303 #define GENL_mc_group(group)
|
H A D | netlink.h | 49 int (*bind)(struct net *net, int group); 50 void (*unbind)(struct net *net, int group); 66 extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); 68 extern int netlink_has_listeners(struct sock *sk, unsigned int group); 73 __u32 group, gfp_t allocation); 75 __u32 portid, __u32 group, gfp_t allocation, 78 extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
|
H A D | rtnetlink.h | 10 extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); 13 u32 group, struct nlmsghdr *nlh, gfp_t flags); 14 extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
|
H A D | mcb.h | 49 * @group: group in Chameleon table 65 int group; member in struct:mcb_device
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | pte-hash64-4k.h | 5 #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ 6 #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */
|
H A D | ipic.h | 34 #define IPIC_SIPRR_A 0x10 /* System Internal Interrupt group A Priority Register */ 35 #define IPIC_SIPRR_B 0x14 /* System Internal Interrupt group B Priority Register */ 36 #define IPIC_SIPRR_C 0x18 /* System Internal Interrupt group C Priority Register */ 37 #define IPIC_SIPRR_D 0x1C /* System Internal Interrupt group D Priority Register */ 42 #define IPIC_SMPRR_A 0x30 /* System Mixed Interrupt group A Priority Register */ 43 #define IPIC_SMPRR_B 0x34 /* System Mixed Interrupt group B Priority Register */
|
H A D | qe_ic.h | 52 QE_IC_GRP_W = 0, /* QE interrupt controller group W */ 53 QE_IC_GRP_X, /* QE interrupt controller group X */ 54 QE_IC_GRP_Y, /* QE interrupt controller group Y */ 55 QE_IC_GRP_Z, /* QE interrupt controller group Z */ 56 QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */ 57 QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
|
/linux-4.1.27/drivers/pinctrl/sirf/ |
H A D | pinctrl-atlas6.c | 4 * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group 133 .group = 1, 136 .group = 2, 157 .group = 2, 163 .group = 1, 166 .group = 0, 184 .group = 2, 190 .group = 1, 193 .group = 0, 213 .group = 2, 219 .group = 1, 222 .group = 0, 240 .group = 0, 243 .group = 1, 246 .group = 2, 263 .group = 2, 277 .group = 1, 291 .group = 0, 294 .group = 1, 311 .group = 1, 325 .group = 0, 328 .group = 1, 345 .group = 0, 348 .group = 1, 365 .group = 1, 382 .group = 1, 399 .group = 1, 416 .group = 3, 431 .group = 3, 446 .group = 3, 463 .group = 3, 477 .group = 1, 494 .group = 2, 511 .group = 0, 528 .group = 0, 545 .group = 1, 562 .group = 1, 579 .group = 1, 596 .group = 1, 609 .group = 0, 612 .group = 1, 629 .group = 1, 646 .group = 2, 649 .group = 3, 666 .group = 3, 683 .group = 0, 700 .group = 0, 717 .group = 2, 731 .group = 1, 751 .group = 0, 755 .group = 2, 773 .group = 2, 787 .group = 0, 804 .group = 0, 821 .group = 0, 835 .group = 0, 849 .group = 0, 863 .group = 2, 877 .group = 0, 894 .group = 1, 913 .group = 0, 948 .group = 0,
|
H A D | pinctrl-prima2.c | 4 * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group 137 .group = 3, 143 .group = 2, 161 .group = 3, 167 .group = 2, 170 .group = 0, 188 .group = 3, 194 .group = 2, 197 .group = 0, 217 .group = 3, 223 .group = 2, 226 .group = 0, 244 .group = 2, 247 .group = 1, 264 .group = 2, 278 .group = 1, 292 .group = 1, 309 .group = 1, 323 .group = 0, 326 .group = 1, 343 .group = 1, 360 .group = 1, 374 .group = 1, 391 .group = 1, 408 .group = 1, 425 .group = 1, 440 .group = 1, 455 .group = 1, 473 .group = 1, 490 .group = 1, 507 .group = 0, 521 .group = 0, 538 .group = 0, 555 .group = 1, 572 .group = 1, 589 .group = 1, 606 .group = 1, 620 .group = 1, 637 .group = 1, 651 .group = 1, 654 .group = 2, 671 .group = 1, 685 .group = 2, 711 .group = 2, 728 .group = 2, 742 .group = 2, 762 .group = 2, 776 .group = 0, 790 .group = 2, 795 .group = 0, 813 .group = 0, 830 .group = 0, 844 .group = 0, 858 .group = 0, 872 .group = 0, 886 .group = 1, 902 .group = 1, 937 .group = 0,
|
H A D | pinctrl-sirf.h | 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 56 unsigned long group; member in struct:sirfsoc_muxmask 70 * struct sirfsoc_pin_group - describes a SiRFprimaII pin group 71 * @name: the name of this specific pin group 72 * @pins: an array of discrete physical pins used in this group, taken 74 * @num_pins: the number of pins in this group array, i.e. the number of
|
/linux-4.1.27/fs/ocfs2/ |
H A D | resize.c | 49 * in the last group. If there are some, mark them or clear 52 * Return how many backups we find in the last group. 98 struct ocfs2_group_desc *group; ocfs2_update_last_group_and_inode() local 113 group = (struct ocfs2_group_desc *)group_bh->b_data; ocfs2_update_last_group_and_inode() 115 /* update the group first. */ ocfs2_update_last_group_and_inode() 117 le16_add_cpu(&group->bg_bits, num_bits); ocfs2_update_last_group_and_inode() 118 le16_add_cpu(&group->bg_free_bits_count, num_bits); ocfs2_update_last_group_and_inode() 122 * this group and update the group bitmap accordingly. ocfs2_update_last_group_and_inode() 127 group, ocfs2_update_last_group_and_inode() 129 le16_add_cpu(&group->bg_free_bits_count, -1 * backups); ocfs2_update_last_group_and_inode() 142 chain = le16_to_cpu(group->bg_chain); ocfs2_update_last_group_and_inode() 165 group, ocfs2_update_last_group_and_inode() 167 le16_add_cpu(&group->bg_free_bits_count, backups); ocfs2_update_last_group_and_inode() 168 le16_add_cpu(&group->bg_bits, -1 * num_bits); ocfs2_update_last_group_and_inode() 169 le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits); ocfs2_update_last_group_and_inode() 262 * existing group. 272 struct ocfs2_group_desc *group = NULL; ocfs2_group_extend() local 328 group = (struct ocfs2_group_desc *)group_bh->b_data; ocfs2_group_extend() 331 if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters > ocfs2_group_extend() 339 (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters); ocfs2_group_extend() 348 /* update the last group descriptor and inode. */ ocfs2_group_extend() 422 u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group); ocfs2_verify_group_and_input() 427 mlog(ML_ERROR, "add a group which is in the current volume.\n"); ocfs2_verify_group_and_input() 432 "the add group should be in chain %u\n", next_free); ocfs2_verify_group_and_input() 434 mlog(ML_ERROR, "add group's clusters overflow.\n"); ocfs2_verify_group_and_input() 436 mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n"); ocfs2_verify_group_and_input() 441 "the last group isn't full. Use group extend first.\n"); ocfs2_verify_group_and_input() 442 else if (input->group != ocfs2_which_cluster_group(inode, cluster)) ocfs2_verify_group_and_input() 443 mlog(ML_ERROR, "group blkno is invalid\n"); ocfs2_verify_group_and_input() 445 mlog(ML_ERROR, "group descriptor check failed.\n"); ocfs2_verify_group_and_input() 452 /* Add a new group descriptor to global_bitmap. */ ocfs2_group_add() 462 struct ocfs2_group_desc *group = NULL; ocfs2_group_add() local 499 ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh); ocfs2_group_add() 501 mlog(ML_ERROR, "Can't read the group descriptor # %llu " ocfs2_group_add() 502 "from the device.", (unsigned long long)input->group); ocfs2_group_add() 514 trace_ocfs2_group_add((unsigned long long)input->group, ocfs2_group_add() 535 group = (struct ocfs2_group_desc *)group_bh->b_data; ocfs2_group_add() 536 bg_ptr = le64_to_cpu(group->bg_next_group); ocfs2_group_add() 537 group->bg_next_group = cr->c_blkno; ocfs2_group_add() 543 group->bg_next_group = cpu_to_le64(bg_ptr); ocfs2_group_add() 553 cr->c_blkno = cpu_to_le64(input->group); ocfs2_group_add()
|
H A D | journal.h | 389 /* group extend. inode update and last group update. */ 392 /* group add. inode update and the new group update. */ 395 /* get one bit out of a suballocator: dinode + group descriptor + 396 * prev. group desc. if we relink. */ 405 /* dinode + group descriptor update. We don't relink on free yet. */ 431 * alloc group descriptor + mkdir/symlink blocks + dir blocks + xattr 471 * inode alloc group descriptor + orphan dir index root + 488 /* global bitmap dinode, group desc., relinked group, 489 * suballocator dinode, group desc., relinked group, 536 /* bitmap dinode, group desc. + relinked group. */ ocfs2_calc_extend_credits() 574 /* parent inode update + new block group header + bitmap inode update ocfs2_calc_group_alloc_credits() 581 * Allocating a discontiguous block group requires the credits from 583 * the group descriptor's extent list. The caller already has started
|
H A D | suballoc.h | 158 u64 group = block - (u64) bit; ocfs2_which_suballoc_group() local 160 return group; ocfs2_which_suballoc_group() 166 /* This should work for all block group descriptors as only ocfs2_cluster_from_desc() 167 * the 1st group descriptor of the cluster bitmap is ocfs2_cluster_from_desc() 191 /* given a cluster offset, calculate which block group it belongs to 197 * finds a problem. A caller that wants to check a group descriptor 206 * Read a group descriptor block into *bh. If *bh is NULL, a bh will be
|
H A D | stackglue.c | 314 const char *group, ocfs2_cluster_connect() 325 BUG_ON(group == NULL); ocfs2_cluster_connect() 347 strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); ocfs2_cluster_connect() 383 int ocfs2_cluster_connect_agnostic(const char *group, ocfs2_cluster_connect_agnostic() argument 395 return ocfs2_cluster_connect(stack_name, NULL, 0, group, grouplen, ocfs2_cluster_connect_agnostic() 423 * Leave the group for this filesystem. This is executed by a userspace 426 static void ocfs2_leave_group(const char *group) ocfs2_leave_group() argument 434 argv[3] = (char *)group; ocfs2_leave_group() 458 void ocfs2_cluster_hangup(const char *group, int grouplen) ocfs2_cluster_hangup() argument 460 BUG_ON(group == NULL); ocfs2_cluster_hangup() 461 BUG_ON(group[grouplen] != '\0'); ocfs2_cluster_hangup() 463 ocfs2_leave_group(group); ocfs2_cluster_hangup() 311 ocfs2_cluster_connect(const char *stack_name, const char *cluster_name, int cluster_name_len, const char *group, int grouplen, struct ocfs2_locking_protocol *lproto, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, struct ocfs2_cluster_connection **conn) ocfs2_cluster_connect() argument
|
/linux-4.1.27/include/linux/mmc/ |
H A D | sd.h | 47 * [23:20] Function group 6 48 * [19:16] Function group 5 49 * [15:12] Function group 4 50 * [11:8] Function group 3 51 * [7:4] Function group 2 52 * [3:0] Function group 1
|
/linux-4.1.27/drivers/pinctrl/ |
H A D | pinctrl-tz1090-pdc.c | 80 * struct tz1090_pdc_pingroup - TZ1090 PDC pin group 81 * @name: Name of pin group. 82 * @pins: Array of pin numbers in this pin group. 83 * @npins: Number of pins in this pin group. 90 * A representation of a group of pins (possibly just one pin) in the TZ1090 91 * PDC pin controller. Each group allows some parameter or parameters to be 132 /* Pin group pins */ 171 #define FUNCTION(mux, fname, group) \ 174 .groups = group##_groups, \ 175 .ngroups = ARRAY_SIZE(group##_groups), \ 186 * MUX_PG() - Initialise a pin group with mux control 187 * @pg_name: Pin group name (stringified, _pins appended to get pins array) 203 * DRV_PG() - Initialise a pin group with drive control 204 * @pg_name: Pin group name (stringified, _pins appended to get pins array) 263 unsigned int group) tz1090_pdc_pinctrl_get_group_name() 265 return tz1090_pdc_groups[group].name; tz1090_pdc_pinctrl_get_group_name() 269 unsigned int group, tz1090_pdc_pinctrl_get_group_pins() 273 *pins = tz1090_pdc_groups[group].pins; tz1090_pdc_pinctrl_get_group_pins() 274 *num_pins = tz1090_pdc_groups[group].npins; tz1090_pdc_pinctrl_get_group_pins() 314 unsigned int *num_maps, const char *group, add_map_mux() 321 (*map)[*num_maps].data.mux.group = group; add_map_mux() 329 * get_group_selector() - returns the group selector for a group 330 * @pin_group: the pin group to look up 333 * error message if the group isn't found or debug messages. 337 unsigned int group; get_group_selector() local 339 for (group = 0; group < ARRAY_SIZE(tz1090_pdc_groups); ++group) get_group_selector() 340 if (!strcmp(tz1090_pdc_groups[group].name, pin_group)) get_group_selector() 341 return group; get_group_selector() 349 const char *group, unsigned long *configs, add_map_configs() 369 if (get_group_selector(group) >= 0) add_map_configs() 374 (*map)[*num_maps].data.configs.group_or_pin = group; add_map_configs() 407 const char *group; tz1090_pdc_pinctrl_dt_subnode_to_map() local 438 of_property_for_each_string(np, "tz1090,pins", prop, group) { tz1090_pdc_pinctrl_dt_subnode_to_map() 441 group, function); tz1090_pdc_pinctrl_dt_subnode_to_map() 448 num_maps, group, configs, tz1090_pdc_pinctrl_dt_subnode_to_map() 529 * @grp: Pin mux group 552 unsigned int group) tz1090_pdc_pinctrl_set_mux() 555 const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group]; tz1090_pdc_pinctrl_set_mux() 557 dev_dbg(pctldev->dev, "%s(func=%u (%s), group=%u (%s))\n", tz1090_pdc_pinctrl_set_mux() 560 group, tz1090_pdc_groups[group].name); tz1090_pdc_pinctrl_set_mux() 566 /* does this group even control the function? */ tz1090_pdc_pinctrl_set_mux() 583 unsigned int group; find_mux_group() local 586 for (group = 0; group < ARRAY_SIZE(tz1090_pdc_groups); ++group, ++grp) { find_mux_group() 779 "%s: group %s has no drive control\n", tz1090_pdc_pinconf_group_reg() 813 unsigned int group, tz1090_pdc_pinconf_group_get() 817 const struct tz1090_pdc_pingroup *g = &tz1090_pdc_groups[group]; tz1090_pdc_pinconf_group_get() 842 unsigned int group, tz1090_pdc_pinconf_group_set() 847 const struct tz1090_pdc_pingroup *g = &tz1090_pdc_groups[group]; tz1090_pdc_pinconf_group_set() 860 dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n", tz1090_pdc_pinconf_group_set() 870 * of a group, so do the pins one by one. This is tz1090_pdc_pinconf_group_set() 262 tz1090_pdc_pinctrl_get_group_name(struct pinctrl_dev *pctl, unsigned int group) tz1090_pdc_pinctrl_get_group_name() argument 268 tz1090_pdc_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, const unsigned int **pins, unsigned int *num_pins) tz1090_pdc_pinctrl_get_group_pins() argument 313 add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps, unsigned int *num_maps, const char *group, const char *function) add_map_mux() argument 346 add_map_configs(struct device *dev, struct pinctrl_map **map, unsigned int *reserved_maps, unsigned int *num_maps, const char *group, unsigned long *configs, unsigned int num_configs) add_map_configs() argument 550 tz1090_pdc_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned int function, unsigned int group) tz1090_pdc_pinctrl_set_mux() argument 812 tz1090_pdc_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *config) tz1090_pdc_pinconf_group_get() argument 841 tz1090_pdc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *configs, unsigned num_configs) tz1090_pdc_pinconf_group_set() argument
|
H A D | pinctrl-tz1090.c | 84 * A representation of a group of signals (possibly just one signal) in the 95 * struct tz1090_pingroup - TZ1090 pin group 96 * @name: Name of pin group. 97 * @pins: Array of pin numbers in this pin group. 98 * @npins: Number of pins in this pin group. 107 * A representation of a group of pins (possibly just one pin) in the TZ1090 108 * pin controller. Each group allows some parameter or parameters to be 339 /* Pins in each pin group */ 488 /* Pins in each drive pin group */ 543 /* individual pins not part of a pin mux group */ 693 #define FUNCTION(mux, fname, group) \ 696 .groups = group##_groups, \ 697 .ngroups = ARRAY_SIZE(group##_groups), \ 754 * DEFINE_SUBMUX() - Defines a submux description separate from a pin group. 791 * MUX_PG() - Initialise a pin group with mux control 792 * @pg_name: Pin group name (stringified, _pins appended to get pins array) 813 * SIMPLE_PG() - Initialise a simple convenience pin group 814 * @pg_name: Pin group name (stringified, _pins appended to get pins array) 816 * A simple pin group is simply used for binding pins together so they can be 828 * DRV_PG() - Initialise a pin group with drive control 829 * @pg_name: Pin group name (stringified, _pins appended to get pins array) 859 * group is below (tz1090_mux_pins). 886 * tz1090_init_mux_pins() - Initialise GPIO pin to mux group mapping. 889 * each pin mux group in tz1090_mux_groups[]. 910 * These are the externally visible pin groups. Some of them allow group control 914 * Pseudo pin groups follow in the group numbers after this array for each GPIO 915 * pin. Any group used for muxing must have all pins belonging to the same pin 916 * mux group. 932 * the pin group naming somewhat arbitrary) 992 unsigned int group) tz1090_pinctrl_get_group_name() 994 if (group < ARRAY_SIZE(tz1090_groups)) { tz1090_pinctrl_get_group_name() 996 return tz1090_groups[group].name; tz1090_pinctrl_get_group_name() 999 unsigned int pin = group - ARRAY_SIZE(tz1090_groups); tz1090_pinctrl_get_group_name() 1005 unsigned int group, tz1090_pinctrl_get_group_pins() 1009 if (group < ARRAY_SIZE(tz1090_groups)) { tz1090_pinctrl_get_group_pins() 1011 *pins = tz1090_groups[group].pins; tz1090_pinctrl_get_group_pins() 1012 *num_pins = tz1090_groups[group].npins; tz1090_pinctrl_get_group_pins() 1015 unsigned int pin = group - ARRAY_SIZE(tz1090_groups); tz1090_pinctrl_get_group_pins() 1058 unsigned int *num_maps, const char *group, add_map_mux() 1065 (*map)[*num_maps].data.mux.group = group; add_map_mux() 1075 const char *group, unsigned long *configs, add_map_configs() 1091 (*map)[*num_maps].data.configs.group_or_pin = group; add_map_configs() 1124 const char *group; tz1090_pinctrl_dt_subnode_to_map() local 1154 of_property_for_each_string(np, "tz1090,pins", prop, group) { tz1090_pinctrl_dt_subnode_to_map() 1157 group, function); tz1090_pinctrl_dt_subnode_to_map() 1164 num_maps, group, configs, tz1090_pinctrl_dt_subnode_to_map() 1353 * tz1090_pinctrl_enable_mux() - Switch a pin mux group to a function. 1358 * Enable a particular function on a pin mux group. Since pin mux descriptions 1406 * tz1090_pinctrl_enable() - Enable a function on a pin group. 1409 * @group: Group index to enable 1411 * Enable a particular function on a group of pins. The per GPIO pin pseudo pin 1413 * and if it belongs to a pin mux group the mux will be switched if it isn't 1416 * group. 1419 unsigned int function, unsigned int group) tz1090_pinctrl_set_mux() 1427 /* group of pins? */ tz1090_pinctrl_set_mux() 1428 if (group < ARRAY_SIZE(tz1090_groups)) { tz1090_pinctrl_set_mux() 1429 grp = &tz1090_groups[group]; tz1090_pinctrl_set_mux() 1433 * All pins in the group must belong to the same mux group, tz1090_pinctrl_set_mux() 1434 * which allows us to just use the mux group of the first pin. tz1090_pinctrl_set_mux() 1439 pin_num = group - ARRAY_SIZE(tz1090_groups); tz1090_pinctrl_set_mux() 1445 /* no mux group, but can still be individually muxed to peripheral */ tz1090_pinctrl_set_mux() 1452 /* mux group already set to a different function? */ tz1090_pinctrl_set_mux() 1456 "%s: can't mux pin(s) to '%s', group already muxed to '%s'\n", tz1090_pinctrl_set_mux() 1465 /* if first pin in mux group to be enabled, enable the group mux */ tz1090_pinctrl_set_mux() 1774 "%s: group %s has no drive control\n", tz1090_pinconf_group_reg() 1803 unsigned int group, tz1090_pinconf_group_get() 1814 if (group >= ARRAY_SIZE(tz1090_groups)) { tz1090_pinconf_group_get() 1815 pin = group - ARRAY_SIZE(tz1090_groups); tz1090_pinconf_group_get() 1819 g = &tz1090_groups[group]; tz1090_pinconf_group_get() 1846 unsigned int group, unsigned long *configs, tz1090_pinconf_group_set() 1860 if (group >= ARRAY_SIZE(tz1090_groups)) { tz1090_pinconf_group_set() 1861 pin = group - ARRAY_SIZE(tz1090_groups); tz1090_pinconf_group_set() 1865 g = &tz1090_groups[group]; tz1090_pinconf_group_set() 1876 dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n", tz1090_pinconf_group_set() 1885 * of a group, so do the pins one by one. This is tz1090_pinconf_group_set() 991 tz1090_pinctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) tz1090_pinctrl_get_group_name() argument 1004 tz1090_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, const unsigned int **pins, unsigned int *num_pins) tz1090_pinctrl_get_group_pins() argument 1057 add_map_mux(struct pinctrl_map **map, unsigned int *reserved_maps, unsigned int *num_maps, const char *group, const char *function) add_map_mux() argument 1072 add_map_configs(struct device *dev, struct pinctrl_map **map, unsigned int *reserved_maps, unsigned int *num_maps, const char *group, unsigned long *configs, unsigned int num_configs) add_map_configs() argument 1418 tz1090_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned int function, unsigned int group) tz1090_pinctrl_set_mux() argument 1802 tz1090_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *config) tz1090_pinconf_group_get() argument 1845 tz1090_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *configs, unsigned num_configs) tz1090_pinconf_group_set() argument
|
H A D | pinmux.c | 325 const char *group; pinmux_map_to_setting() local 350 "function %s can't be selected on any group\n", pinmux_map_to_setting() 354 if (map->data.mux.group) { pinmux_map_to_setting() 356 group = map->data.mux.group; pinmux_map_to_setting() 358 if (!strcmp(group, groups[i])) { pinmux_map_to_setting() 365 "invalid group \"%s\" for function \"%s\"\n", pinmux_map_to_setting() 366 group, map->data.mux.function); pinmux_map_to_setting() 370 group = groups[0]; pinmux_map_to_setting() 373 ret = pinctrl_get_group_selector(pctldev, group); pinmux_map_to_setting() 375 dev_err(pctldev->dev, "invalid group %s in map table\n", pinmux_map_to_setting() 376 map->data.mux.group); pinmux_map_to_setting() 379 setting->data.mux.group = ret; pinmux_map_to_setting() 401 ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, pinmux_enable_setting() 409 setting->data.mux.group); pinmux_enable_setting() 411 "could not get pins for group %s\n", pinmux_enable_setting() 416 /* Try to allocate all pins in this group, one by one */ pinmux_enable_setting() 426 setting->data.mux.group); pinmux_enable_setting() 428 "could not request pin %d (%s) from group %s " pinmux_enable_setting() 449 setting->data.mux.group); pinmux_enable_setting() 481 ret = pctlops->get_group_pins(pctldev, setting->data.mux.group, pinmux_disable_setting() 488 setting->data.mux.group); pinmux_disable_setting() 490 "could not get pins for group %s\n", pinmux_disable_setting() 512 setting->data.mux.group); pinmux_disable_setting() 515 "deactivating group %s - it is already " pinmux_disable_setting() 603 seq_printf(s, " function %s group %s\n", pinmux_pins_show() 607 desc->mux_setting->group)); pinmux_pins_show() 619 seq_printf(s, "group %s\nfunction %s\n", pinmux_show_map() 620 map->data.mux.group ? map->data.mux.group : "(default)", pinmux_show_map() 631 seq_printf(s, "group: %s (%u) function: %s (%u)\n", pinmux_show_setting() 632 pctlops->get_group_name(pctldev, setting->data.mux.group), pinmux_show_setting() 633 setting->data.mux.group, pinmux_show_setting()
|
H A D | pinctrl-utils.c | 58 unsigned *num_maps, const char *group, pinctrl_utils_add_map_mux() 65 (*map)[*num_maps].data.mux.group = group; pinctrl_utils_add_map_mux() 75 unsigned *num_maps, const char *group, pinctrl_utils_add_map_configs() 92 (*map)[*num_maps].data.configs.group_or_pin = group; pinctrl_utils_add_map_configs() 56 pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, const char *function) pinctrl_utils_add_map_mux() argument 73 pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev, struct pinctrl_map **map, unsigned *reserved_maps, unsigned *num_maps, const char *group, unsigned long *configs, unsigned num_configs, enum pinctrl_map_type type) pinctrl_utils_add_map_configs() argument
|
H A D | pinctrl-lantiq.c | 78 const char *group, *pin; ltq_pinctrl_dt_subnode_to_map() local 96 of_property_for_each_string(np, "lantiq,groups", prop, group) { ltq_pinctrl_dt_subnode_to_map() 99 (*map)->data.mux.group = group; ltq_pinctrl_dt_subnode_to_map() 128 of_property_for_each_string(np, "lantiq,groups", prop, group) { ltq_pinctrl_dt_subnode_to_map() 133 (*map)->name = group; ltq_pinctrl_dt_subnode_to_map() 134 (*map)->data.configs.group_or_pin = group; ltq_pinctrl_dt_subnode_to_map() 262 unsigned group) ltq_pmx_set() 265 const struct ltq_pin_group *pin_grp = &info->grps[group]; ltq_pmx_set() 270 dev_err(info->dev, "Failed to set the pin group: %s\n", ltq_pmx_set() 271 info->grps[group].name); ltq_pmx_set() 260 ltq_pmx_set(struct pinctrl_dev *pctrldev, unsigned func, unsigned group) ltq_pmx_set() argument
|
H A D | pinctrl-tegra.c | 66 unsigned group) tegra_pinctrl_get_group_name() 70 return pmx->soc->groups[group].name; tegra_pinctrl_get_group_name() 74 unsigned group, tegra_pinctrl_get_group_pins() 80 *pins = pmx->soc->groups[group].pins; tegra_pinctrl_get_group_pins() 81 *num_pins = pmx->soc->groups[group].npins; tegra_pinctrl_get_group_pins() 132 const char *group; tegra_pinctrl_dt_subnode_to_map() local 175 of_property_for_each_string(np, "nvidia,pins", prop, group) { tegra_pinctrl_dt_subnode_to_map() 178 reserved_maps, num_maps, group, tegra_pinctrl_dt_subnode_to_map() 186 reserved_maps, num_maps, group, tegra_pinctrl_dt_subnode_to_map() 268 unsigned group) tegra_pinctrl_set_mux() 275 g = &pmx->soc->groups[group]; tegra_pinctrl_set_mux() 432 "Config param %04x (%s) not supported on group %s\n", tegra_pinconf_reg() 457 unsigned group, unsigned long *config) tegra_pinconf_group_get() 468 g = &pmx->soc->groups[group]; tegra_pinconf_group_get() 485 unsigned group, unsigned long *configs, tegra_pinconf_group_set() 497 g = &pmx->soc->groups[group]; tegra_pinconf_group_set() 556 struct seq_file *s, unsigned group) tegra_pinconf_group_dbg_show() 565 g = &pmx->soc->groups[group]; tegra_pinconf_group_dbg_show() 645 * Each mux group will appear in 4 functions' list of groups. tegra_pinctrl_probe() 65 tegra_pinctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned group) tegra_pinctrl_get_group_name() argument 73 tegra_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *num_pins) tegra_pinctrl_get_group_pins() argument 266 tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) tegra_pinctrl_set_mux() argument 456 tegra_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) tegra_pinconf_group_get() argument 484 tegra_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) tegra_pinconf_group_set() argument 555 tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) tegra_pinconf_group_dbg_show() argument
|
H A D | pinctrl-adi2.h | 15 * struct adi_pin_group - describes a pin group 16 * @name: the name of this pin group
|
H A D | pinctrl-utils.h | 30 unsigned *num_maps, const char *group, 34 unsigned *num_maps, const char *group,
|
H A D | pinctrl-tegra.h | 80 * struct tegra_pingroup - Tegra pin group 81 * @name The name of the pin group. 82 * @pins An array of pin IDs included in this pin group. 84 * @funcs The mux functions which can be muxed onto this group. 118 * -1 in a *_reg field means that feature is unsupported for this group. 122 * A representation of a group of pins (possibly just one pin) in the Tegra 123 * pin controller. Each group allows some parameter or parameters to be
|
H A D | pinctrl-as3722.c | 182 unsigned group) as3722_pinctrl_get_group_name() 186 return as_pci->pin_groups[group].name; as3722_pinctrl_get_group_name() 190 unsigned group, const unsigned **pins, unsigned *num_pins) as3722_pinctrl_get_group_pins() 194 *pins = as_pci->pin_groups[group].pins; as3722_pinctrl_get_group_pins() 195 *num_pins = as_pci->pin_groups[group].npins; as3722_pinctrl_get_group_pins() 234 unsigned group) as3722_pinctrl_set() 237 int gpio_cntr_reg = AS3722_GPIOn_CONTROL_REG(group); as3722_pinctrl_set() 242 __func__, group, function, val); as3722_pinctrl_set() 248 group, ret); as3722_pinctrl_set() 251 as_pci->gpio_control[group].io_function = function; as3722_pinctrl_set() 263 group, ret); as3722_pinctrl_set() 266 as_pci->gpio_control[group].mode_prop = as3722_pinctrl_set() 181 as3722_pinctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned group) as3722_pinctrl_get_group_name() argument 189 as3722_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *num_pins) as3722_pinctrl_get_group_pins() argument 233 as3722_pinctrl_set(struct pinctrl_dev *pctldev, unsigned function, unsigned group) as3722_pinctrl_set() argument
|
/linux-4.1.27/fs/ext3/ |
H A D | ialloc.c | 28 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 31 * The file system contains group descriptors which are located after the 176 * Otherwise we simply return a random group. 180 * It's OK to put directory into a group unless 184 * Parent's group is preferred, if it doesn't satisfy these 186 * of the groups look good we just look for a group with more 187 * free inodes than average (starting at parent's group). 204 int group = -1, i; find_group_orlov() local 218 group = prandom_u32(); find_group_orlov() 219 parent_group = (unsigned)group % ngroups; find_group_orlov() 221 group = (parent_group + i) % ngroups; find_group_orlov() 222 desc = ext3_get_group_desc (sb, group, NULL); find_group_orlov() 231 best_group = group; find_group_orlov() 244 group = (parent_group + i) % ngroups; find_group_orlov() 245 desc = ext3_get_group_desc (sb, group, NULL); find_group_orlov() 254 return group; find_group_orlov() 259 group = (parent_group + i) % ngroups; find_group_orlov() 260 desc = ext3_get_group_desc (sb, group, NULL); find_group_orlov() 264 return group; find_group_orlov() 284 int group, i; find_group_other() local 289 group = parent_group; find_group_other() 290 desc = ext3_get_group_desc (sb, group, NULL); find_group_other() 293 return group; find_group_other() 304 group = (group + parent->i_ino) % ngroups; find_group_other() 307 * Use a quadratic hash to find a group with a free inode and some free find_group_other() 311 group += i; find_group_other() 312 if (group >= ngroups) find_group_other() 313 group -= ngroups; find_group_other() 314 desc = ext3_get_group_desc (sb, group, NULL); find_group_other() 317 return group; find_group_other() 321 * That failed: try linear search for a free inode, even if that group find_group_other() 324 group = parent_group; find_group_other() 326 if (++group >= ngroups) find_group_other() 327 group = 0; find_group_other() 328 desc = ext3_get_group_desc (sb, group, NULL); find_group_other() 330 return group; find_group_other() 338 * a directory, then a forward search is made for a block group with both 340 * the groups with above-average free space, that group with the fewest 344 * group to find a free inode. 352 int group; ext3_new_inode() local 377 group = find_group_orlov(sb, dir); ext3_new_inode() 379 group = find_group_other(sb, dir); ext3_new_inode() 382 if (group == -1) ext3_new_inode() 388 gdp = ext3_get_group_desc(sb, group, &bh2); ext3_new_inode() 393 bitmap_bh = read_inode_bitmap(sb, group); ext3_new_inode() 409 if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group), ext3_new_inode() 431 * group descriptor metadata has not yet been updated. ext3_new_inode() 434 if (++group == sbi->s_groups_count) ext3_new_inode() 435 group = 0; ext3_new_inode() 441 ino += group * EXT3_INODES_PER_GROUP(sb) + 1; ext3_new_inode() 445 "block_group = %d, inode=%lu", group, ino); ext3_new_inode() 453 spin_lock(sb_bgl_lock(sbi, group)); ext3_new_inode() 458 spin_unlock(sb_bgl_lock(sbi, group)); ext3_new_inode() 495 ei->i_block_group = group; ext3_new_inode() 671 printk("group %d: stored = %d, counted = %lu\n", ext3_count_free_inodes()
|
H A D | resize.c | 27 unsigned group = input->group; verify_group_input() local 29 unsigned overhead = ext3_bg_has_super(sb, group) ? verify_group_input() 30 (1 + ext3_bg_num_gdb(sb, group) + verify_group_input() 41 printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks " verify_group_input() 43 ext3_bg_has_super(sb, input->group) ? "normal" : verify_group_input() 44 "no-super", input->group, input->blocks_count, verify_group_input() 47 if (group != sbi->s_groups_count) verify_group_input() 49 "Cannot add at group %u (only %lu groups)", verify_group_input() 50 input->group, sbi->s_groups_count); verify_group_input() 53 ext3_warning(sb, __func__, "Last group not full"); verify_group_input() 66 "Block bitmap not in group (block %u)", verify_group_input() 70 "Inode bitmap not in group (block %u)", verify_group_input() 75 "Inode table not in group (blocks %u-"E3FSBLK")", verify_group_input() 182 * Set up the block and inode bitmaps, and the inode table for the new group. 192 ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group); setup_new_group_blocks() 193 int reserved_gdb = ext3_bg_has_super(sb, input->group) ? setup_new_group_blocks() 195 unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group); setup_new_group_blocks() 210 if (input->group != sbi->s_groups_count) { setup_new_group_blocks() 220 if (ext3_bg_has_super(sb, input->group)) { setup_new_group_blocks() 225 /* Copy all of the GDT blocks into the backup in this group */ setup_new_group_blocks() 230 ext3_debug("update backup group %#04lx (+%d)\n", block, bit); setup_new_group_blocks() 258 /* Zero out all of the reserved backup group descriptor table blocks */ setup_new_group_blocks() 350 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ... 383 * It is assumed that they are stored in group order. Returns the number of 415 * Called when we need to bring a reserved group descriptor table block into 419 * block, in group order. Even though we know all the block numbers we need, 433 unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb); add_new_gdb() 444 "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n", add_new_gdb() 479 "new group %u GDT block "E3FSBLK" not reserved", add_new_gdb() 480 input->group, gdblock); add_new_gdb() 567 * Called when we are adding a new group which has a backup copy of each of 568 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks. 650 * the new group to its reserved primary GDT block. reserve_backup_gdb() 652 blk = input->group * EXT3_BLOCKS_PER_GROUP(sb); reserve_backup_gdb() 685 * superblocks, and the location of the new group metadata in the GDT backups. 703 unsigned group; update_backups() local 710 group = 1; update_backups() 715 while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) { update_backups() 724 bh = sb_getblk(sb, group * bpg + blk_off); update_backups() 762 "can't update backup for group %d (err %d), " update_backups() 763 "forcing fsck on next reboot", group, err); update_backups() 770 /* Add group descriptor data to an existing or new group descriptor block. 776 * Otherwise, we may need to add backup GDT blocks for a sparse group. 779 * in the new group's counts to the superblock. Prior to that we have 780 * not really "added" the group at all. We re-check that we are still 781 * adding in the last group in case things have changed since verifying. 787 int reserved_gdb = ext3_bg_has_super(sb, input->group) ? ext3_group_add() 796 gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb); ext3_group_add() 797 gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb); ext3_group_add() 842 * block. If we are adding a group past the last current GDT block, ext3_group_add() 844 * are adding a group with superblock/GDT backups we will also ext3_group_add() 848 ext3_bg_has_super(sb, input->group) ? ext3_group_add() 856 if (input->group != sbi->s_groups_count) { ext3_group_add() 867 * We will only either add reserved group blocks to a backup group ext3_group_add() 868 * or remove reserved blocks for the first group in a new group block. ext3_group_add() 877 if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) && ext3_group_add() 884 * OK, now we've set up the new group. Time to make it active. ext3_group_add() 887 * so we have to be safe wrt. concurrent accesses the group ext3_group_add() 889 * group descriptor data etc. *before* we enable the group. ext3_group_add() 893 * group. ext3_group_add() 896 * group; then we update the total disk blocks count; then we ext3_group_add() 897 * update the groups count to enable the group; then finally we ext3_group_add() 902 /* Update group descriptor block for new group */ ext3_group_add() 913 * increasing the group count so that once the group is enabled, ext3_group_add() 916 * We always allocate group-by-group, then block-by-block or ext3_group_add() 917 * inode-by-inode within a group, so enabling these ext3_group_add() 918 * blocks/inodes before the group is live won't actually let us ext3_group_add() 940 * NB. These rules can be relaxed when checking the group count ext3_group_add() 942 * group after serialising against the group count, and we can ext3_group_add() 955 /* Update the reserved block counts only once the new group is ext3_group_add() 984 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>" 1008 printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK ext3_group_extend() 1031 /* Handle the remaining blocks in the last group only. */ ext3_group_extend() 1053 "will only finish group ("E3FSBLK ext3_group_extend() 1067 * one group descriptor via ext3_free_blocks(). ext3_group_extend() 1111 printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n", ext3_group_extend()
|
H A D | balloc.c | 24 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap 27 * The file system contains group descriptors which are located after the 37 * Calculate the block group number and offset, given a block number 52 * ext3_get_group_desc() -- load group descriptor from disk 54 * @block_group: given block group 56 * group descriptor 140 * @block_group: given block group 258 * @grp_goal: given goal block relative to the allocation block group 259 * @group: the current allocation block group 262 * Test if the given goal block (group relative) is within the file's 265 * If the reservation window is outside the goal allocation group, return 0; 273 unsigned int group, struct super_block * sb) goal_in_my_reservation() 277 group_first_block = ext3_group_first_block_no(sb, group); goal_in_my_reservation() 527 * Check to see if we are freeing blocks across a group ext3_free_blocks_sb() 655 /* And the group descriptor block */ ext3_free_blocks_sb() 656 BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); ext3_free_blocks_sb() 695 * @nr: given allocation block group 696 * @bh: bufferhead contains the bitmap of the given block group 732 * @start: the starting block (group relative) of the search 733 * @bh: bufferhead contains the block group bitmap 734 * @maxblocks: the ending block (group relative) of the reservation 764 * @start: the starting block (group relative) to find next 766 * @bh: bufferhead contains the block group bitmap 767 * @maxblocks: the ending block (group relative) for the search 822 * @lock: the spin lock for this block group 823 * @block: the free block (group relative) to allocate 824 * @bh: the buffer_head contains the block group bitmap 855 * @group: given allocation block group 857 * @grp_goal: given target block within the group 869 * the block group's last block. 876 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, ext3_try_to_allocate() argument 886 group_first_block = ext3_group_first_block_no(sb, group); ext3_try_to_allocate() 890 /* reservation window cross group boundary */ ext3_try_to_allocate() 894 /* reservation window crosses group boundary */ ext3_try_to_allocate() 927 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), ext3_try_to_allocate() 943 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), ext3_try_to_allocate() 979 * group. The search will end when we found the start of next 1039 * have a reservation across the group boundary here find_next_reservable_window() 1079 * reservation list (the list that inside the group). We try to 1081 * or the beginning of the group, if there is no goal. 1085 * it. If there is no free block until the end of group, then the 1086 * whole group is full, we failed. Otherwise, check if the free 1097 * failed: we failed to find a reservation window in this group 1101 * @grp_goal: The goal (group-relative). It is where the search for a 1105 * of the group. 1108 * @group: the group we are trying to allocate in 1109 * @bitmap_bh: the block group block bitmap 1114 unsigned int group, struct buffer_head *bitmap_bh) alloc_new_reservation() 1124 group_first_block = ext3_group_first_block_no(sb, group); alloc_new_reservation() 1137 * if the old reservation is cross group boundary alloc_new_reservation() 1141 * that belongs to the next group. In this case, there is no alloc_new_reservation() 1143 * in this group(which will fail). we should alloc_new_reservation() 1147 * window to the first block of next group. alloc_new_reservation() 1289 * @group: given allocation block group 1291 * @grp_goal: given target block within the group 1316 unsigned int group, struct buffer_head *bitmap_bh, ext3_try_to_allocate_with_rsv() 1347 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, ext3_try_to_allocate_with_rsv() 1352 * grp_goal is a group relative block number (if there is a goal) ext3_try_to_allocate_with_rsv() 1355 * first block is the block number of the first block in this group ext3_try_to_allocate_with_rsv() 1357 group_first_block = ext3_group_first_block_no(sb, group); ext3_try_to_allocate_with_rsv() 1378 grp_goal, group, sb)) { ext3_try_to_allocate_with_rsv() 1382 group, bitmap_bh); ext3_try_to_allocate_with_rsv() 1387 grp_goal, group, sb)) ext3_try_to_allocate_with_rsv() 1403 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, ext3_try_to_allocate_with_rsv() 1481 * allocate block(s) from the block group contains the goal block first. If that 1499 ext3_grpblk_t free_blocks; /* number of free blocks in a group */ ext3_new_blocks() 1591 * group_no and gdp correctly point to the last group visited. ext3_new_blocks() 1602 * skip this group (and avoid loading bitmap) if there ext3_new_blocks() 1608 * skip this group if the number of ext3_new_blocks() 1620 * try to allocate block(s) from this group, without a goal(-1). ext3_new_blocks() 1649 ext3_debug("using block group %d(%d)\n", ext3_new_blocks() 1729 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); ext3_new_blocks() 1775 * Adds up the number of free blocks from each block group. 1806 printk("group %d: stored = %d, counted = %lu\n", ext3_count_free_blocks() 1839 static int ext3_group_sparse(int group) ext3_group_sparse() argument 1841 if (group <= 1) ext3_group_sparse() 1843 if (!(group & 1)) ext3_group_sparse() 1845 return (test_root(group, 7) || test_root(group, 5) || ext3_group_sparse() 1846 test_root(group, 3)); ext3_group_sparse() 1850 * ext3_bg_has_super - number of blocks used by the superblock in group 1852 * @group: group number to check 1855 * in this group. Currently this will be only 0 or 1. 1857 int ext3_bg_has_super(struct super_block *sb, int group) ext3_bg_has_super() argument 1861 !ext3_group_sparse(group)) ext3_bg_has_super() 1866 static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group) ext3_bg_num_gdb_meta() argument 1868 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); ext3_bg_num_gdb_meta() 1872 if (group == first || group == first + 1 || group == last) ext3_bg_num_gdb_meta() 1877 static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group) ext3_bg_num_gdb_nometa() argument 1879 return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0; ext3_bg_num_gdb_nometa() 1883 * ext3_bg_num_gdb - number of blocks used by the group table in group 1885 * @group: group number to check 1887 * Return the number of blocks used by the group descriptor table 1888 * (primary or backup) in this group. In the future there may be a 1889 * different number of descriptor blocks in each group. 1891 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) ext3_bg_num_gdb() argument 1895 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb); ext3_bg_num_gdb() 1899 return ext3_bg_num_gdb_nometa(sb,group); ext3_bg_num_gdb() 1901 return ext3_bg_num_gdb_meta(sb,group); ext3_bg_num_gdb() 1906 * ext3_trim_all_free -- function to trim all free space in alloc. group 1908 * @group: allocation group to trim 1909 * @start: first group block to examine 1910 * @max: last group block to examine 1911 * @gdp: allocation group description structure 1914 * ext3_trim_all_free walks through group's block bitmap searching for free 1918 * the extent in the block bitmap. This is done until whole group is scanned. 1921 unsigned int group, ext3_trim_all_free() 1934 * We will update one block bitmap, and one group descriptor ext3_trim_all_free() 1940 bitmap_bh = read_block_bitmap(sb, group); ext3_trim_all_free() 1951 gdp = ext3_get_group_desc(sb, group, &gdp_bh); ext3_trim_all_free() 1965 /* Walk through the whole group */ ext3_trim_all_free() 1977 && claim_block(sb_bgl_lock(sbi, group), ext3_trim_all_free() 1987 ext3_group_first_block_no(sb, group); ext3_trim_all_free() 1990 spin_lock(sb_bgl_lock(sbi, group)); ext3_trim_all_free() 1992 spin_unlock(sb_bgl_lock(sbi, group)); ext3_trim_all_free() 2013 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group), ext3_trim_all_free() 2025 spin_lock(sb_bgl_lock(sbi, group)); ext3_trim_all_free() 2027 spin_unlock(sb_bgl_lock(sbi, group)); ext3_trim_all_free() 2056 /* And the group descriptor block */ ext3_trim_all_free() 2057 BUFFER_TRACE(gdp_bh, "dirtied group descriptor block"); ext3_trim_all_free() 2062 ext3_debug("trimmed %d blocks in the group %d\n", ext3_trim_all_free() 2063 count, group); ext3_trim_all_free() 2082 * start to start+len. For each such a group ext3_trim_all_free function 2088 unsigned long group, first_group, last_group; ext3_trim_fs() local 2114 /* Determine first and last group to examine based on start and len */ ext3_trim_fs() 2120 /* end now represents the last block to discard in this group */ ext3_trim_fs() 2123 for (group = first_group; group <= last_group; group++) { ext3_trim_fs() 2124 gdp = ext3_get_group_desc(sb, group, NULL); ext3_trim_fs() 2131 * change it for the last group, note that last_block is ext3_trim_fs() 2134 if (group == last_group) ext3_trim_fs() 2138 ret = ext3_trim_all_free(sb, group, first_block, ext3_trim_fs() 2146 * For every group except the first one, we are sure ext3_trim_fs() 272 goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal, unsigned int group, struct super_block * sb) goal_in_my_reservation() argument 1112 alloc_new_reservation(struct ext3_reserve_window_node *my_rsv, ext3_grpblk_t grp_goal, struct super_block *sb, unsigned int group, struct buffer_head *bitmap_bh) alloc_new_reservation() argument 1315 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, unsigned int group, struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal, struct ext3_reserve_window_node * my_rsv, unsigned long *count, int *errp) ext3_try_to_allocate_with_rsv() argument 1920 ext3_trim_all_free(struct super_block *sb, unsigned int group, ext3_grpblk_t start, ext3_grpblk_t max, ext3_grpblk_t minblocks) ext3_trim_all_free() argument
|
/linux-4.1.27/drivers/pwm/ |
H A D | pwm-atmel-tcb.c | 70 unsigned group = pwm->hwpwm / 2; atmel_tcb_pwm_request() local 79 ret = clk_prepare_enable(tc->clk[group]); atmel_tcb_pwm_request() 92 cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_request() 100 __raw_readl(regs + ATMEL_TC_REG(group, RA)); atmel_tcb_pwm_request() 103 __raw_readl(regs + ATMEL_TC_REG(group, RB)); atmel_tcb_pwm_request() 106 tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC)); atmel_tcb_pwm_request() 113 __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_request() 138 unsigned group = pwm->hwpwm / 2; atmel_tcb_pwm_disable() local 155 cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_disable() 172 __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_disable() 176 * If both PWM devices in this group are disabled we stop the clock. atmel_tcb_pwm_disable() 180 regs + ATMEL_TC_REG(group, CCR)); atmel_tcb_pwm_disable() 183 ATMEL_TC_REG(group, CCR)); atmel_tcb_pwm_disable() 194 unsigned group = pwm->hwpwm / 2; atmel_tcb_pwm_enable() local 211 cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_enable() 254 __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR)); atmel_tcb_pwm_enable() 257 __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA)); atmel_tcb_pwm_enable() 259 __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB)); atmel_tcb_pwm_enable() 261 __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC)); atmel_tcb_pwm_enable() 265 regs + ATMEL_TC_REG(group, CCR)); atmel_tcb_pwm_enable() 275 unsigned group = pwm->hwpwm / 2; atmel_tcb_pwm_config() local 283 unsigned rate = clk_get_rate(tc->clk[group]); atmel_tcb_pwm_config() 327 * - group 0: PWM 0 & 1 atmel_tcb_pwm_config() 328 * - group 1: PWM 2 & 3 atmel_tcb_pwm_config() 329 * - group 2: PWM 4 & 5 atmel_tcb_pwm_config() 331 * PWM devices in a given group must be configured with the atmel_tcb_pwm_config() 335 * in this group before applying the new config. atmel_tcb_pwm_config() 341 "failed to configure period_ns: PWM group already configured with a different value\n"); atmel_tcb_pwm_config()
|
/linux-4.1.27/drivers/pinctrl/spear/ |
H A D | pinctrl-spear.c | 99 int i, j, group; pmx_init_addr() local 101 for (group = 0; group < machdata->ngroups; group++) { pmx_init_addr() 102 pgroup = machdata->groups[group]; pmx_init_addr() 122 unsigned group) spear_pinctrl_get_group_name() 126 return pmx->machdata->groups[group]->name; spear_pinctrl_get_group_name() 130 unsigned group, const unsigned **pins, unsigned *num_pins) spear_pinctrl_get_group_pins() 134 *pins = pmx->machdata->groups[group]->pins; spear_pinctrl_get_group_pins() 135 *num_pins = pmx->machdata->groups[group]->npins; spear_pinctrl_get_group_pins() 154 const char *function, *group; spear_pinctrl_dt_node_to_map() local 181 of_property_for_each_string(np, "st,pins", prop, group) { for_each_child_of_node() 183 (*map)[index].data.mux.group = group; for_each_child_of_node() 238 unsigned function, unsigned group, bool enable) spear_pinctrl_endisable() 246 pgroup = pmx->machdata->groups[group]; spear_pinctrl_endisable() 263 dev_err(pmx->dev, "pinmux group: %s not supported\n", spear_pinctrl_endisable() 272 unsigned group) spear_pinctrl_set_mux() 274 return spear_pinctrl_endisable(pctldev, function, group, true); spear_pinctrl_set_mux() 307 * Some SoC have configuration options applicable to group of pins, gpio_request_endisable() 121 spear_pinctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned group) spear_pinctrl_get_group_name() argument 129 spear_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *num_pins) spear_pinctrl_get_group_pins() argument 237 spear_pinctrl_endisable(struct pinctrl_dev *pctldev, unsigned function, unsigned group, bool enable) spear_pinctrl_endisable() argument 271 spear_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) spear_pinctrl_set_mux() argument
|
/linux-4.1.27/tools/perf/tests/ |
H A D | thread-mg-share.c | 12 /* thread group */ test__thread_mg_share() 26 * thread group (pid: 0, tids: 0, 1, 2, 3) test__thread_mg_share() 27 * other group (pid: 4, tids: 4, 5) test__thread_mg_share() 66 /* release thread group */ test__thread_mg_share() 78 /* release other group */ test__thread_mg_share()
|
H A D | attr.py | 74 self.group = '' 207 # For each defined group in the expected events 208 # check we match the same group in the result. 210 group = exp_event.group 212 if (group == ''): 216 res_group = result[res_name].group 217 if res_group not in match[group]: 218 raise Fail(self, 'group failure') 220 log.debug(" group: [%s] matches group leader %s" % 221 (exp_name, str(match[group]))) 233 event.group = iname 234 log.debug('[%s] has group leader [%s]' % (name, iname))
|
/linux-4.1.27/include/net/ |
H A D | genetlink.h | 11 * struct genl_multicast_group - generic netlink multicast group 12 * @name: name of the multicast group, names are per-family 36 * @mcast_bind: a socket bound to the given multicast group (which 38 * @mcast_unbind: a socket was unbound from the given multicast group. 46 * @mcgrp_offset: starting number of multicast group IDs in this family 64 int (*mcast_bind)(struct net *net, int group); 65 void (*mcast_unbind)(struct net *net, int group); 188 u32 group, struct nlmsghdr *nlh, gfp_t flags); 290 * @group: offset of multicast group in groups array 295 u32 portid, unsigned int group, gfp_t flags) genlmsg_multicast_netns() 297 if (WARN_ON_ONCE(group >= family->n_mcgrps)) genlmsg_multicast_netns() 299 group = family->mcgrp_offset + group; genlmsg_multicast_netns() 300 return nlmsg_multicast(net->genl_sock, skb, portid, group, flags); genlmsg_multicast_netns() 308 * @group: offset of multicast group in groups array 313 unsigned int group, gfp_t flags) genlmsg_multicast() 316 portid, group, flags); genlmsg_multicast() 324 * @group: offset of multicast group in groups array 331 unsigned int group, gfp_t flags); 406 * @group: the broadcast group that will notice the error 407 * (this is the offset of the multicast group in the groups array) 414 u32 portid, u32 group, int code) genl_set_err() 416 if (WARN_ON_ONCE(group >= family->n_mcgrps)) genl_set_err() 418 group = family->mcgrp_offset + group; genl_set_err() 419 return netlink_set_err(net->genl_sock, portid, group, code); genl_set_err() 423 struct net *net, unsigned int group) genl_has_listeners() 425 if (WARN_ON_ONCE(group >= family->n_mcgrps)) genl_has_listeners() 427 group = family->mcgrp_offset + group; genl_has_listeners() 428 return netlink_has_listeners(net->genl_sock, group); genl_has_listeners() 293 genlmsg_multicast_netns(struct genl_family *family, struct net *net, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) genlmsg_multicast_netns() argument 311 genlmsg_multicast(struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) genlmsg_multicast() argument 413 genl_set_err(struct genl_family *family, struct net *net, u32 portid, u32 group, int code) genl_set_err() argument 422 genl_has_listeners(struct genl_family *family, struct net *net, unsigned int group) genl_has_listeners() argument
|
/linux-4.1.27/drivers/pinctrl/freescale/ |
H A D | pinctrl-mxs.c | 44 unsigned group) mxs_get_group_name() 48 return d->soc->groups[group].name; mxs_get_group_name() 51 static int mxs_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, mxs_get_group_pins() argument 56 *pins = d->soc->groups[group].pins; mxs_get_group_pins() 57 *num_pins = d->soc->groups[group].npins; mxs_get_group_pins() 73 char *group = NULL; mxs_dt_node_to_map() local 96 /* Check for group node which has both mux and config settings */ mxs_dt_node_to_map() 108 /* Compose group name */ mxs_dt_node_to_map() 109 group = kzalloc(length, GFP_KERNEL); mxs_dt_node_to_map() 110 if (!group) { mxs_dt_node_to_map() 114 snprintf(group, length, "%s.%d", np->name, reg); mxs_dt_node_to_map() 115 new_map[i].data.mux.group = group; mxs_dt_node_to_map() 128 group; mxs_dt_node_to_map() 140 kfree(group); mxs_dt_node_to_map() 153 kfree(map[i].data.mux.group); mxs_dt_free_map() 186 unsigned group, mxs_pinctrl_get_func_groups() 192 *groups = d->soc->functions[group].groups; mxs_pinctrl_get_func_groups() 193 *num_groups = d->soc->functions[group].ngroups; mxs_pinctrl_get_func_groups() 199 unsigned group) mxs_pinctrl_set_mux() 202 struct mxs_group *g = &d->soc->groups[group]; mxs_pinctrl_set_mux() 243 unsigned group, unsigned long *config) mxs_pinconf_group_get() 247 *config = d->soc->groups[group].config; mxs_pinconf_group_get() 253 unsigned group, unsigned long *configs, mxs_pinconf_group_set() 257 struct mxs_group *g = &d->soc->groups[group]; mxs_pinconf_group_set() 323 struct seq_file *s, unsigned group) mxs_pinconf_group_dbg_show() 327 if (!mxs_pinconf_group_get(pctldev, group, &config)) mxs_pinconf_group_dbg_show() 355 char *group; mxs_pinctrl_parse_group() local 359 group = devm_kzalloc(&pdev->dev, length, GFP_KERNEL); mxs_pinctrl_parse_group() 360 if (!group) mxs_pinctrl_parse_group() 363 snprintf(group, length, "%s", np->name); mxs_pinctrl_parse_group() 365 snprintf(group, length, "%s.%d", np->name, val); mxs_pinctrl_parse_group() 366 g->name = group; mxs_pinctrl_parse_group() 410 dev_err(&pdev->dev, "no group is defined\n"); mxs_pinctrl_probe_dt() 43 mxs_get_group_name(struct pinctrl_dev *pctldev, unsigned group) mxs_get_group_name() argument 185 mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, unsigned group, const char * const **groups, unsigned * const num_groups) mxs_pinctrl_get_func_groups() argument 198 mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) mxs_pinctrl_set_mux() argument 242 mxs_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) mxs_pinconf_group_get() argument 252 mxs_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) mxs_pinconf_group_set() argument 322 mxs_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) mxs_pinconf_group_dbg_show() argument
|
H A D | pinctrl-imx1.h | 33 * struct imx1_pin_group - describes an IMX pin group 34 * @name: the name of this specific pin group 35 * @pins: an array of imx1_pin structs used in this group 36 * @npins: the number of pins in this group array, i.e. the number of
|
H A D | pinctrl-imx.h | 38 * struct imx_pin_group - describes an IMX pin group 39 * @name: the name of this specific pin group 40 * @npins: the number of pins in this group array, i.e. the number of
|
H A D | pinctrl-imx.c | 114 * first find the group of this node and check if we need create imx_dt_node_to_map() 119 dev_err(info->dev, "unable to find group for node %s\n", imx_dt_node_to_map() 144 new_map[0].data.mux.group = np->name; imx_dt_node_to_map() 160 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", imx_dt_node_to_map() 161 (*map)->data.mux.function, (*map)->data.mux.group, map_num); imx_dt_node_to_map() 183 unsigned group) imx_pmx_set() 193 * Configure the mux mode for each pin in the group for a specific imx_pmx_set() 196 grp = &info->groups[group]; imx_pmx_set() 199 dev_dbg(ipctl->dev, "enable function %s group %s\n", imx_pmx_set() 305 unsigned int pin, group; imx_pmx_gpio_request_enable() local 317 for (group = 0; group < info->ngroups; group++) { imx_pmx_gpio_request_enable() 318 grp = &info->groups[group]; imx_pmx_gpio_request_enable() 450 struct seq_file *s, unsigned group) imx_pinconf_group_dbg_show() 459 if (group > info->ngroups) imx_pinconf_group_dbg_show() 463 grp = &info->groups[group]; imx_pinconf_group_dbg_show() 505 dev_dbg(info->dev, "group(%d): %s\n", index, np->name); imx_pinctrl_parse_groups() 511 /* Initialise group */ imx_pinctrl_parse_groups() 182 imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) imx_pmx_set() argument 449 imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned group) imx_pinconf_group_dbg_show() argument
|
/linux-4.1.27/kernel/ |
H A D | uid16.c | 19 SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) SYSCALL_DEFINE3() 21 return sys_chown(filename, low2highuid(user), low2highgid(group)); SYSCALL_DEFINE3() 24 SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group) SYSCALL_DEFINE3() 26 return sys_lchown(filename, low2highuid(user), low2highgid(group)); SYSCALL_DEFINE3() 29 SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group) SYSCALL_DEFINE3() 31 return sys_fchown(fd, low2highuid(user), low2highgid(group)); SYSCALL_DEFINE3() 116 old_gid_t group; groups16_to_user() local 121 group = high2lowgid(from_kgid_munged(user_ns, kgid)); groups16_to_user() 122 if (put_user(group, grouplist+i)) groups16_to_user() 134 old_gid_t group; groups16_from_user() local 138 if (get_user(group, grouplist+i)) groups16_from_user() 141 kgid = make_kgid(user_ns, low2highgid(group)); groups16_from_user()
|
H A D | groups.c | 2 * Supplementary group IDs 155 * set_groups - Change a group subscription in a set of credentials 157 * @group_info: The group list to install 170 * set_current_groups - Change current's group subscription 171 * @group_info: The group list to impose 173 * Validate a group subscription and, if valid, impose it upon current's task 253 * Check whether we're fsgid/egid or in the supplemental group..
|
/linux-4.1.27/drivers/usb/gadget/function/ |
H A D | uvc_configfs.c | 144 static struct config_item *uvcg_control_header_make(struct config_group *group, uvcg_control_header_make() argument 164 static void uvcg_control_header_drop(struct config_group *group, uvcg_control_header_drop() argument 174 struct config_group group; member in struct:uvcg_control_header_grp 189 struct config_group group; member in struct:uvcg_default_processing 196 struct uvcg_default_processing, group); to_uvcg_default_processing() 212 struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \ 218 opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \ 250 struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; uvcg_default_processing_bm_controls_show() 257 opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; uvcg_default_processing_bm_controls_show() 296 &uvcg_default_processing.group, 302 struct config_group group; member in struct:uvcg_processing_grp 311 struct config_group group; member in struct:uvcg_default_camera 318 struct uvcg_default_camera, group); to_uvcg_default_camera() 334 struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \ 340 opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \ 380 struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; uvcg_default_camera_bm_controls_show() 387 opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> uvcg_default_camera_bm_controls_show() 428 &uvcg_default_camera.group, 434 struct config_group group; member in struct:uvcg_camera_grp 443 struct config_group group; member in struct:uvcg_default_output 450 struct uvcg_default_output, group); to_uvcg_default_output() 466 struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \ 472 opts_item = dout->group.cg_item.ci_parent->ci_parent-> \ 520 &uvcg_default_output.group, 526 struct config_group group; member in struct:uvcg_output_grp 534 &uvcg_camera_grp.group, 535 &uvcg_output_grp.group, 541 struct config_group group; member in struct:uvcg_terminal_grp 550 struct config_group group; member in struct:uvcg_control_class 558 struct uvcg_control_class, group); uvcg_get_ctl_class_arr() 662 &uvcg_control_class_fs.group, 663 &uvcg_control_class_ss.group, 669 struct config_group group; member in struct:uvcg_control_class_grp 677 &uvcg_control_header_grp.group, 678 &uvcg_processing_grp.group, 679 &uvcg_terminal_grp.group, 680 &uvcg_control_class_grp.group, 686 struct config_group group; member in struct:uvcg_control_grp 695 struct config_group group; member in struct:uvcg_uncompressed_grp 700 struct config_group group; member in struct:uvcg_mjpeg_grp 704 &uvcg_uncompressed_grp.group.cg_item, 705 &uvcg_mjpeg_grp.group.cg_item, 714 struct config_group group; member in struct:uvcg_format 723 return container_of(to_config_group(item), struct uvcg_format, group); to_uvcg_format() 730 struct mutex *su_mutex = &f->group.cg_subsys->su_mutex; uvcg_format_bma_controls_show() 736 opts_item = f->group.cg_item.ci_parent->ci_parent->ci_parent; uvcg_format_bma_controls_show() 757 struct mutex *su_mutex = &ch->group.cg_subsys->su_mutex; uvcg_format_bma_controls_store() 762 opts_item = ch->group.cg_item.ci_parent->ci_parent->ci_parent; uvcg_format_bma_controls_store() 837 group); uvcg_streaming_header_allow_link() 876 group); uvcg_streaming_header_drop_link() 957 *uvcg_streaming_header_make(struct config_group *group, const char *name) uvcg_streaming_header_make() argument 976 static void uvcg_streaming_header_drop(struct config_group *group, uvcg_streaming_header_drop() argument 986 struct config_group group; member in struct:uvcg_streaming_header_grp 1261 static struct config_item *uvcg_frame_make(struct config_group *group, uvcg_frame_make() argument 1282 opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; uvcg_frame_make() 1286 fmt = to_uvcg_format(&group->cg_item); uvcg_frame_make() 1306 static void uvcg_frame_drop(struct config_group *group, struct config_item *item) uvcg_frame_drop() argument 1313 opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; uvcg_frame_drop() 1317 fmt = to_uvcg_format(&group->cg_item); uvcg_frame_drop() 1332 container_of(to_config_group(item), struct uvcg_format, group), to_uvcg_uncompressed() 1354 struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; uvcg_uncompressed_guid_format_show() 1358 opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; uvcg_uncompressed_guid_format_show() 1375 struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; uvcg_uncompressed_guid_format_store() 1380 opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; uvcg_uncompressed_guid_format_store() 1411 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1416 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1437 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1442 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1459 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1465 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1546 static struct config_group *uvcg_uncompressed_make(struct config_group *group, uvcg_uncompressed_make() argument 1571 config_group_init_type_name(&h->fmt.group, name, uvcg_uncompressed_make() 1574 return &h->fmt.group; uvcg_uncompressed_make() 1577 static void uvcg_uncompressed_drop(struct config_group *group, uvcg_uncompressed_drop() argument 1604 container_of(to_config_group(item), struct uvcg_format, group), to_uvcg_mjpeg() 1626 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1631 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1651 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1656 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1673 struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ 1679 opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ 1759 static struct config_group *uvcg_mjpeg_make(struct config_group *group, uvcg_mjpeg_make() argument 1778 config_group_init_type_name(&h->fmt.group, name, uvcg_mjpeg_make() 1781 return &h->fmt.group; uvcg_mjpeg_make() 1784 static void uvcg_mjpeg_drop(struct config_group *group, uvcg_mjpeg_drop() argument 1804 struct config_group group; member in struct:uvcg_default_color_matching 1811 struct uvcg_default_color_matching, group); to_uvcg_default_color_matching() 1827 struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \ 1833 opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \ 1878 &uvcg_default_color_matching.group, 1884 struct config_group group; member in struct:uvcg_color_matching_grp 1893 struct config_group group; member in struct:uvcg_streaming_class 1901 struct uvcg_streaming_class, group); __uvcg_get_stream_class_arr() 1961 grp = &f->fmt->group; __uvcg_iter_strm_cls() 2243 &uvcg_streaming_class_fs.group, 2244 &uvcg_streaming_class_hs.group, 2245 &uvcg_streaming_class_ss.group, 2251 struct config_group group; member in struct:uvcg_streaming_class_grp 2259 &uvcg_streaming_header_grp.group, 2260 &uvcg_uncompressed_grp.group, 2261 &uvcg_mjpeg_grp.group, 2262 &uvcg_color_matching_grp.group, 2263 &uvcg_streaming_class_grp.group, 2269 struct config_group group; member in struct:uvcg_streaming_grp 2277 &uvcg_control_grp.group, 2278 &uvcg_streaming_grp.group, 2285 func_inst.group); to_f_uvc_opts() 2388 config_group_init_type_name(&uvcg_control_header_grp.group, uvcg_attach_configfs() 2391 config_group_init_type_name(&uvcg_default_processing.group, uvcg_attach_configfs() 2394 uvcg_init_group(&uvcg_processing_grp.group, uvcg_attach_configfs() 2398 config_group_init_type_name(&uvcg_default_camera.group, uvcg_attach_configfs() 2401 uvcg_init_group(&uvcg_camera_grp.group, uvcg_attach_configfs() 2405 config_group_init_type_name(&uvcg_default_output.group, uvcg_attach_configfs() 2408 uvcg_init_group(&uvcg_output_grp.group, uvcg_attach_configfs() 2412 uvcg_init_group(&uvcg_terminal_grp.group, uvcg_attach_configfs() 2416 config_group_init_type_name(&uvcg_control_class_fs.group, uvcg_attach_configfs() 2419 config_group_init_type_name(&uvcg_control_class_ss.group, uvcg_attach_configfs() 2422 uvcg_init_group(&uvcg_control_class_grp.group, uvcg_attach_configfs() 2426 uvcg_init_group(&uvcg_control_grp.group, uvcg_attach_configfs() 2430 config_group_init_type_name(&uvcg_streaming_header_grp.group, uvcg_attach_configfs() 2433 config_group_init_type_name(&uvcg_uncompressed_grp.group, uvcg_attach_configfs() 2436 config_group_init_type_name(&uvcg_mjpeg_grp.group, uvcg_attach_configfs() 2439 config_group_init_type_name(&uvcg_default_color_matching.group, uvcg_attach_configfs() 2442 uvcg_init_group(&uvcg_color_matching_grp.group, uvcg_attach_configfs() 2446 config_group_init_type_name(&uvcg_streaming_class_fs.group, uvcg_attach_configfs() 2449 config_group_init_type_name(&uvcg_streaming_class_hs.group, uvcg_attach_configfs() 2452 config_group_init_type_name(&uvcg_streaming_class_ss.group, uvcg_attach_configfs() 2455 uvcg_init_group(&uvcg_streaming_class_grp.group, uvcg_attach_configfs() 2459 uvcg_init_group(&uvcg_streaming_grp.group, uvcg_attach_configfs() 2463 uvcg_init_group(&opts->func_inst.group, uvcg_attach_configfs()
|
/linux-4.1.27/drivers/gpu/drm/rcar-du/ |
H A D | rcar_du_group.h | 24 * struct rcar_du_group - CRTCs and planes group 27 * @index: group index 28 * @use_count: number of users of the group (rcar_du_group_(get|put)) 31 * @planes: planes handled by the group
|
H A D | rcar_du_crtc.c | 34 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_read() 41 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_write() 48 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_clr() 56 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_set() 65 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_clr_set() 83 ret = rcar_du_group_get(rcrtc->group); rcar_du_crtc_get() 98 rcar_du_group_put(rcrtc->group); rcar_du_crtc_put() 140 dev_dbg(rcrtc->group->dev->dev, rcar_du_crtc_set_display_timing() 146 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR, rcar_du_crtc_set_display_timing() 148 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0); rcar_du_crtc_set_display_timing() 182 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_route_output() 216 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { rcar_du_crtc_update_planes() 217 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; rcar_du_crtc_update_planes() 263 mutex_lock(&rcrtc->group->lock); rcar_du_crtc_update_planes() 264 if (rcar_du_group_read(rcrtc->group, DPTSR) != dptsr) { rcar_du_crtc_update_planes() 265 rcar_du_group_write(rcrtc->group, DPTSR, dptsr); rcar_du_crtc_update_planes() 266 if (rcrtc->group->used_crtcs) rcar_du_crtc_update_planes() 267 rcar_du_group_restart(rcrtc->group); rcar_du_crtc_update_planes() 269 mutex_unlock(&rcrtc->group->lock); rcar_du_crtc_update_planes() 272 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, rcar_du_crtc_update_planes() 337 struct rcar_du_device *rcdu = rcrtc->group->dev; rcar_du_crtc_wait_page_flip() 367 rcar_du_group_set_routing(rcrtc->group); rcar_du_crtc_start() 370 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); rcar_du_crtc_start() 381 rcar_du_group_start_stop(rcrtc->group, true); rcar_du_crtc_start() 408 rcar_du_group_start_stop(rcrtc->group, false); rcar_du_crtc_stop() 430 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) { rcar_du_crtc_resume() 431 struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i]; rcar_du_crtc_resume() 589 rcrtc->group = rgrp; rcar_du_crtc_create()
|
/linux-4.1.27/drivers/usb/gadget/ |
H A D | configfs.h | 16 return container_of(to_config_group(item), struct usb_os_desc, group); to_usb_os_desc()
|
H A D | configfs.c | 45 struct config_group group; member in struct:gadget_info 69 struct config_group group; member in struct:config_usb_cfg 85 struct config_group group; member in struct:gadget_strings 90 struct config_group group; member in struct:os_desc 98 struct config_group group; member in struct:gadget_config_name 312 return container_of(to_config_group(item), struct gadget_info, group); to_gadget_info() 318 group); to_gadget_strings() 325 group); to_gadget_config_name() 331 group); to_config_usb_cfg() 338 struct usb_function_instance, group); to_usb_function_instance() 378 struct config_group *group = to_config_group(usb_func_ci); config_usb_cfg_link() local 379 struct usb_function_instance *fi = container_of(group, config_usb_cfg_link() 380 struct usb_function_instance, group); config_usb_cfg_link() 429 struct config_group *group = to_config_group(usb_func_ci); config_usb_cfg_unlink() local 430 struct usb_function_instance *fi = container_of(group, config_usb_cfg_unlink() 431 struct usb_function_instance, group); config_usb_cfg_unlink() 547 struct config_group *group, function_make() 574 ret = config_item_set_name(&fi->group.cg_item, name); function_make() 587 gi = container_of(group, struct gadget_info, functions_group); function_make() 592 return &fi->group; function_make() 596 struct config_group *group, function_drop() 602 gi = container_of(group, struct gadget_info, functions_group); function_drop() 642 struct config_group *group, config_desc_make() 652 gi = container_of(group, struct gadget_info, configs_group); config_desc_make() 687 cfg->group.default_groups = cfg->default_groups; config_desc_make() 690 config_group_init_type_name(&cfg->group, name, config_desc_make() 699 return &cfg->group; config_desc_make() 707 struct config_group *group, config_desc_drop() 752 return container_of(to_config_group(item), struct os_desc, group); to_os_desc() 762 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_use_show() 774 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_use_store() 796 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_b_vendor_code_show() 808 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_b_vendor_code_store() 830 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_qw_sign_show() 843 gi = to_gadget_info(os_desc->group.cg_item.ci_parent); os_desc_qw_sign_store() 885 struct config_usb_cfg, group); os_desc_link() 1076 struct config_group *group, ext_prop_make() 1095 desc = container_of(group, struct usb_os_desc, group); ext_prop_make() 1120 static void ext_prop_drop(struct config_group *group, struct config_item *item) ext_prop_drop() argument 1123 struct usb_os_desc *desc = to_usb_os_desc(&group->cg_item); ext_prop_drop() 1256 config_group_init_type_name(&d->group, "", interface_type); usb_os_desc_prepare_interf_dir() 1257 config_item_set_name(&d->group.cg_item, "interface.%s", usb_os_desc_prepare_interf_dir() 1259 interface_groups[n_interf] = &d->group; usb_os_desc_prepare_interf_dir() 1466 struct config_group *group, gadgets_make() 1475 gi->group.default_groups = gi->default_groups; gadgets_make() 1476 gi->group.default_groups[0] = &gi->functions_group; gadgets_make() 1477 gi->group.default_groups[1] = &gi->configs_group; gadgets_make() 1478 gi->group.default_groups[2] = &gi->strings_group; gadgets_make() 1479 gi->group.default_groups[3] = &gi->os_desc_group; gadgets_make() 1519 config_group_init_type_name(&gi->group, name, gadgets_make() 1521 return &gi->group; gadgets_make() 1527 static void gadgets_drop(struct config_group *group, struct config_item *item) gadgets_drop() argument 546 function_make( struct config_group *group, const char *name) function_make() argument 595 function_drop( struct config_group *group, struct config_item *item) function_drop() argument 641 config_desc_make( struct config_group *group, const char *name) config_desc_make() argument 706 config_desc_drop( struct config_group *group, struct config_item *item) config_desc_drop() argument 1075 ext_prop_make( struct config_group *group, const char *name) ext_prop_make() argument 1465 gadgets_make( struct config_group *group, const char *name) gadgets_make() argument
|
/linux-4.1.27/include/linux/rtc/ |
H A D | sirfsoc_rtciobrg.h | 5 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
/linux-4.1.27/arch/ia64/scripts/ |
H A D | unwcheck.py | 44 func = m.group(1) 45 start = long(m.group(2), 16) 46 end = long(m.group(3), 16) 53 rlen_sum += long(m.group(1))
|
/linux-4.1.27/arch/arm/mach-prima2/ |
H A D | pm.h | 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
H A D | common.h | 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
H A D | hotplug.c | 4 * Copyright (c) 2012 Cambridge Silicon Radio Limited, a CSR plc group company.
|
/linux-4.1.27/net/netlink/ |
H A D | af_netlink.h | 44 int (*netlink_bind)(struct net *net, int group); 45 void (*netlink_unbind)(struct net *net, int group); 80 int (*bind)(struct net *net, int group); 81 void (*unbind)(struct net *net, int group);
|
H A D | genetlink.c | 70 * Bit 0 is marked as already used since group 0 is invalid. 72 * abuses the API and thinks it can statically use group 1. 73 * That group will typically conflict with other groups that 78 * also abused this API and relied on family == group ID, we 79 * cater to that by giving it a static family and group ID. 239 /* special-case our own group and hacks */ genl_validate_assign_mc_groups() 994 static int genl_bind(struct net *net, int group) genl_bind() argument 1003 if (group >= f->mcgrp_offset && list_for_each_entry() 1004 group < f->mcgrp_offset + f->n_mcgrps) { list_for_each_entry() 1005 int fam_grp = group - f->mcgrp_offset; list_for_each_entry() 1022 static void genl_unbind(struct net *net, int group) genl_unbind() argument 1031 if (group >= f->mcgrp_offset && list_for_each_entry() 1032 group < f->mcgrp_offset + f->n_mcgrps) { list_for_each_entry() 1033 int fam_grp = group - f->mcgrp_offset; list_for_each_entry() 1053 /* we'll bump the group number right afterwards */ genl_pernet_init() 1100 static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, genlmsg_mcast() argument 1115 portid, group, flags); for_each_net_rcu() 1123 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 1130 u32 portid, unsigned int group, gfp_t flags) genlmsg_multicast_allns() 1132 if (WARN_ON_ONCE(group >= family->n_mcgrps)) genlmsg_multicast_allns() 1134 group = family->mcgrp_offset + group; genlmsg_multicast_allns() 1135 return genlmsg_mcast(skb, portid, group, flags); genlmsg_multicast_allns() 1140 struct sk_buff *skb, struct net *net, u32 portid, u32 group, genl_notify() 1149 if (WARN_ON_ONCE(group >= family->n_mcgrps)) genl_notify() 1151 group = family->mcgrp_offset + group; genl_notify() 1152 nlmsg_notify(sk, skb, portid, group, report, flags); genl_notify() 1129 genlmsg_multicast_allns(struct genl_family *family, struct sk_buff *skb, u32 portid, unsigned int group, gfp_t flags) genlmsg_multicast_allns() argument 1139 genl_notify(struct genl_family *family, struct sk_buff *skb, struct net *net, u32 portid, u32 group, struct nlmsghdr *nlh, gfp_t flags) genl_notify() argument
|
/linux-4.1.27/include/trace/events/ |
H A D | signal.h | 41 * @group: shared or private 53 int group, int result), 55 TP_ARGS(sig, info, task, group, result), 63 __field( int, group ) 72 __entry->group = group; 78 __entry->comm, __entry->pid, __entry->group,
|
/linux-4.1.27/include/linux/usb/ |
H A D | gadget_configfs.h | 56 struct config_group *group, \ 72 config_group_init_type_name(&new->group, name, \ 75 gi = container_of(group, struct struct_member, strings_group); \ 87 return &new->group; \ 94 struct config_group *group, \
|
/linux-4.1.27/net/mac80211/ |
H A D | rc80211_minstrel_ht.h | 55 /* bitfield of supported MCS rates of this group */ 58 /* sorted rate set within a MCS group*/ 98 /* current MCS group to be sampled */ 104 /* MCS rate group info and statistics */ 124 int minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
|
H A D | rc80211_minstrel_ht.c | 45 * Define group sort order: HT40 -> SGI -> #streams 53 /* MCS rate information for an MCS group */ 265 * Look up an MCS group index based on mac80211 rate information 288 int group, idx; minstrel_ht_get_stats() local 291 group = minstrel_ht_get_group_idx(rate); minstrel_ht_get_stats() 294 group = minstrel_vht_get_group_idx(rate); minstrel_ht_get_stats() 297 group = MINSTREL_CCK_GROUP; minstrel_ht_get_stats() 304 if (!(mi->groups[group].supported & BIT(idx))) minstrel_ht_get_stats() 307 return &mi->groups[group].rates[idx]; minstrel_ht_get_stats() 321 minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate, minstrel_ht_get_tp_avg() argument 330 if (group != MINSTREL_CCK_GROUP) minstrel_ht_get_tp_avg() 333 nsecs += minstrel_mcs_groups[group].duration[rate]; minstrel_ht_get_tp_avg() 388 * Find and set the topmost probability rate per sta and per group 442 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted 481 int tmp_max_streams, group, tmp_idx, tmp_prob; minstrel_ht_prob_rate_reduce_streams() local 486 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { minstrel_ht_prob_rate_reduce_streams() 487 mg = &mi->groups[group]; minstrel_ht_prob_rate_reduce_streams() 488 if (!mg->supported || group == MINSTREL_CCK_GROUP) minstrel_ht_prob_rate_reduce_streams() 492 tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma; minstrel_ht_prob_rate_reduce_streams() 494 if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && minstrel_ht_prob_rate_reduce_streams() 495 (minstrel_mcs_groups[group].streams < tmp_max_streams)) { minstrel_ht_prob_rate_reduce_streams() 497 tmp_tp = minstrel_ht_get_tp_avg(mi, group, minstrel_ht_prob_rate_reduce_streams() 518 int group, i, j, cur_prob; minstrel_ht_update_stats() local 539 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { minstrel_ht_update_stats() 541 mg = &mi->groups[group]; minstrel_ht_update_stats() 547 /* (re)Initialize group rate indexes */ minstrel_ht_update_stats() 549 tmp_group_tp_rate[j] = group; minstrel_ht_update_stats() 555 index = MCS_GROUP_RATES * group + i; minstrel_ht_update_stats() 562 if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0) minstrel_ht_update_stats() 566 if (group != MINSTREL_CCK_GROUP) { minstrel_ht_update_stats() 569 } else if (group == MINSTREL_CCK_GROUP) { minstrel_ht_update_stats() 574 /* Find max throughput rate set within a group */ minstrel_ht_update_stats() 578 /* Find max probability rate per group and global */ minstrel_ht_update_stats() 653 int group, orig_group; minstrel_downgrade_rate() local 655 orig_group = group = *idx / MCS_GROUP_RATES; minstrel_downgrade_rate() 656 while (group > 0) { minstrel_downgrade_rate() 657 group--; minstrel_downgrade_rate() 659 if (!mi->groups[group].supported) minstrel_downgrade_rate() 662 if (minstrel_mcs_groups[group].streams > minstrel_downgrade_rate() 667 *idx = mi->groups[group].max_group_tp_rate[0]; minstrel_downgrade_rate() 669 *idx = mi->groups[group].max_group_tp_rate[1]; minstrel_downgrade_rate() 785 const struct mcs_group *group; minstrel_calc_retransmit() local 804 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; minstrel_calc_retransmit() 805 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000; minstrel_calc_retransmit() 843 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; minstrel_ht_set_rate() local 846 u16 flags = group->flags; minstrel_ht_set_rate() 865 idx = ((group->streams - 1) << 4) | minstrel_ht_set_rate() 868 idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8; minstrel_ht_set_rate() 910 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; minstrel_get_duration() local 911 return group->duration[index % MCS_GROUP_RATES]; minstrel_get_duration()
|
/linux-4.1.27/drivers/s390/block/ |
H A D | dasd_alias.c | 82 /* for hyper pav there is only one group */ _find_group() 88 struct alias_pav_group, group); _find_group() 91 /* for base pav we have to find the group that matches the base */ _find_group() 96 list_for_each_entry(pos, &lcu->grouplist, group) { _find_group() 311 * adding it to a pav group. 320 struct alias_pav_group *group; _add_device_to_lcu() local 344 group = _find_group(lcu, &uid); _add_device_to_lcu() 345 if (!group) { _add_device_to_lcu() 346 group = kzalloc(sizeof(*group), GFP_ATOMIC); _add_device_to_lcu() 347 if (!group) _add_device_to_lcu() 349 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor)); _add_device_to_lcu() 350 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial)); _add_device_to_lcu() 351 group->uid.ssid = uid.ssid; _add_device_to_lcu() 353 group->uid.base_unit_addr = uid.real_unit_addr; _add_device_to_lcu() 355 group->uid.base_unit_addr = uid.base_unit_addr; _add_device_to_lcu() 356 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit)); _add_device_to_lcu() 357 INIT_LIST_HEAD(&group->group); _add_device_to_lcu() 358 INIT_LIST_HEAD(&group->baselist); _add_device_to_lcu() 359 INIT_LIST_HEAD(&group->aliaslist); _add_device_to_lcu() 360 list_add(&group->group, &lcu->grouplist); _add_device_to_lcu() 363 list_move(&device->alias_list, &group->baselist); _add_device_to_lcu() 365 list_move(&device->alias_list, &group->aliaslist); _add_device_to_lcu() 366 private->pavgroup = group; _add_device_to_lcu() 374 struct alias_pav_group *group; _remove_device_from_lcu() local 378 group = private->pavgroup; _remove_device_from_lcu() 379 if (!group) _remove_device_from_lcu() 382 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) { _remove_device_from_lcu() 383 list_del(&group->group); _remove_device_from_lcu() 384 kfree(group); _remove_device_from_lcu() 387 if (group->next == device) _remove_device_from_lcu() 388 group->next = NULL; _remove_device_from_lcu() 486 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) { _lcu_update() 499 list_del(&pavgroup->group); _lcu_update() 570 struct alias_pav_group *group; _schedule_lcu_update() local 581 group = list_first_entry(&lcu->grouplist, _schedule_lcu_update() 582 struct alias_pav_group, group); _schedule_lcu_update() 583 if (!list_empty(&group->baselist)) _schedule_lcu_update() 584 usedev = list_first_entry(&group->baselist, _schedule_lcu_update() 587 else if (!list_empty(&group->aliaslist)) _schedule_lcu_update() 588 usedev = list_first_entry(&group->aliaslist, _schedule_lcu_update() 666 struct alias_pav_group *group; dasd_alias_get_start_dev() local 672 group = private->pavgroup; dasd_alias_get_start_dev() 674 if (!group || !lcu) dasd_alias_get_start_dev() 691 alias_device = group->next; dasd_alias_get_start_dev() 693 if (list_empty(&group->aliaslist)) { dasd_alias_get_start_dev() 697 alias_device = list_first_entry(&group->aliaslist, dasd_alias_get_start_dev() 702 if (list_is_last(&alias_device->alias_list, &group->aliaslist)) dasd_alias_get_start_dev() 703 group->next = list_first_entry(&group->aliaslist, dasd_alias_get_start_dev() 706 group->next = list_first_entry(&alias_device->alias_list, dasd_alias_get_start_dev() 783 list_for_each_entry(pavgroup, &lcu->grouplist, group) { _restart_all_base_devices_on_lcu() 820 list_for_each_entry(pavgroup, &lcu->grouplist, group) { flush_all_alias_devices_on_lcu() 867 list_for_each_entry(pavgroup, &lcu->grouplist, group) { _stop_all_devices_on_lcu() 893 list_for_each_entry(pavgroup, &lcu->grouplist, group) { _unstop_all_devices_on_lcu()
|
/linux-4.1.27/drivers/pinctrl/meson/ |
H A D | pinctrl-meson.h | 20 * struct meson_pmx_group - a pinmux group 22 * @name: group name 23 * @pins: pins in the group 24 * @num_pins: number of pins in the group 25 * @is_gpio: whether the group is a single GPIO group 26 * @reg: register offset for the group in the domain mux registers 27 * @bit bit index enabling the group 28 * @domain: index of the domain this group belongs to
|
H A D | pinctrl-meson.c | 35 * Every pinmux group can be enabled by a specific bit in the first 186 * @sel_group: index of the selected group, or -1 if none 195 struct meson_pmx_group *group; meson_pmx_disable_other_groups() local 200 group = &pc->data->groups[i]; meson_pmx_disable_other_groups() 201 if (group->is_gpio || i == sel_group) meson_pmx_disable_other_groups() 204 for (j = 0; j < group->num_pins; j++) { meson_pmx_disable_other_groups() 205 if (group->pins[j] == pin) { meson_pmx_disable_other_groups() 206 /* We have found a group using the pin */ meson_pmx_disable_other_groups() 207 domain = &pc->domains[group->domain]; meson_pmx_disable_other_groups() 209 group->reg * 4, meson_pmx_disable_other_groups() 210 BIT(group->bit), 0); meson_pmx_disable_other_groups() 221 struct meson_pmx_group *group = &pc->data->groups[group_num]; meson_pmx_set_mux() local 222 struct meson_domain *domain = &pc->domains[group->domain]; meson_pmx_set_mux() 225 dev_dbg(pc->dev, "enable function %s, group %s\n", func->name, meson_pmx_set_mux() 226 group->name); meson_pmx_set_mux() 230 * The selected group is not disabled to avoid glitches. meson_pmx_set_mux() 232 for (i = 0; i < group->num_pins; i++) meson_pmx_set_mux() 233 meson_pmx_disable_other_groups(pc, group->pins[i], group_num); meson_pmx_set_mux() 237 ret = regmap_update_bits(domain->reg_mux, group->reg * 4, meson_pmx_set_mux() 238 BIT(group->bit), BIT(group->bit)); meson_pmx_set_mux() 424 struct meson_pmx_group *group = &pc->data->groups[num_group]; meson_pinconf_group_set() local 427 dev_dbg(pc->dev, "set pinconf for group %s\n", group->name); meson_pinconf_group_set() 429 for (i = 0; i < group->num_pins; i++) { meson_pinconf_group_set() 430 meson_pinconf_set(pcdev, group->pins[i], configs, meson_pinconf_group_set() 438 unsigned int group, unsigned long *config) meson_pinconf_group_get() 437 meson_pinconf_group_get(struct pinctrl_dev *pcdev, unsigned int group, unsigned long *config) meson_pinconf_group_get() argument
|
/linux-4.1.27/drivers/staging/lustre/include/linux/lnet/ |
H A D | lnetst.h | 60 #define LSTIO_GROUP_ADD 0xC10 /* add group */ 62 #define LSTIO_GROUP_INFO 0xC12 /* query default information of specified group */ 63 #define LSTIO_GROUP_DEL 0xC13 /* delete group */ 64 #define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */ 65 #define LSTIO_GROUP_UPDATE 0xC15 /* update group */ 85 } lst_bid_t; /*** batch id (group of tests) */ 119 lstcon_ndlist_ent_t tbe_cli_nle; /* client (group) node_list entry */ 120 lstcon_ndlist_ent_t tbe_srv_nle; /* server (group) node_list entry */ 275 int lstio_dbg_type; /* IN: debug sessin|batch|group|nodes list */ 280 char *lstio_dbg_namep; /* IN: name of group|batch */ 289 char *lstio_grp_namep; /* IN: group name */ 295 char *lstio_grp_namep; /* IN: group name */ 298 #define LST_GROUP_CLEAN 1 /* remove inactive nodes in the group */ 299 #define LST_GROUP_REFRESH 2 /* refresh inactive nodes in the group */ 300 #define LST_GROUP_RMND 3 /* delete nodes from the group */ 307 char *lstio_grp_namep; /* IN: group name */ 316 char *lstio_grp_namep; /* IN: group name */ 326 int lstio_grp_idx; /* IN: group idx */ 335 lstcon_ndlist_ent_t *lstio_grp_entp; /* OUT: description of group */ 406 int lstio_sta_nmlen; /* IN: group name length */ 407 char *lstio_sta_namep; /* IN: group name */ 432 int lstio_tes_sgrp_nmlen; /* IN: source group name length */ 433 char *lstio_tes_sgrp_name; /* IN: group name */ 434 int lstio_tes_dgrp_nmlen; /* IN: destination group name length */ 435 char *lstio_tes_dgrp_name; /* IN: group name */
|
/linux-4.1.27/drivers/staging/gdm724x/ |
H A D | netlink_k.c | 115 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) netlink_send() argument 122 if (group > ND_MAX_GROUP) netlink_send() 125 if (!netlink_has_listeners(sock, group+1)) netlink_send() 139 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); netlink_send() 145 group, type, len, ret); netlink_send() 146 else if (netlink_has_listeners(sock, group+1)) netlink_send()
|
/linux-4.1.27/drivers/staging/gdm72xx/ |
H A D | netlink_k.c | 116 int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) netlink_send() argument 123 if (group > ND_MAX_GROUP) { netlink_send() 124 pr_err("Group %d is invalied.\n", group); netlink_send() 125 pr_err("Valid group is 0 ~ %d.\n", ND_MAX_GROUP); netlink_send() 146 ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC); netlink_send() 152 group, type, len, ret); netlink_send()
|
/linux-4.1.27/arch/um/drivers/ |
H A D | vde.h | 21 char *group; member in struct:vde_init
|
H A D | vde_kern.c | 6 * ethN=vde,<vde_switch>,<mac addr>,<port>,<group>,<mode>,<description> 79 .group = NULL, vde_setup() 83 &init->group, &mode_str, &init->descr, NULL); vde_setup()
|
/linux-4.1.27/include/rdma/ |
H A D | rdma_netlink.h | 70 * Send the supplied skb to a netlink group. 73 * @group: Netlink group ID 78 unsigned int group, gfp_t flags);
|
H A D | ib_sa.h | 332 * group. 334 * @device: Device associated with the multicast group. 336 * group. 337 * @rec: SA multicast member record specifying group attributes. 338 * @comp_mask: Component mask indicating which group attributes of %rec are 345 * multicast group. If the join operation is started successfully, it returns 354 * -EINVAL: The MCMemberRecord values differed from the existing group's. 356 * group, and the user must rejoin the group to continue using it. 369 * any reference on the multicast group. 382 * @device: Device associated with the multicast group. 384 * group. 385 * @mgid: MGID of multicast group.
|
/linux-4.1.27/include/media/ |
H A D | sh_mobile_ceu.h | 26 unsigned int *asd_sizes; /* 0-terminated array pf asd group sizes */
|
/linux-4.1.27/include/uapi/linux/ |
H A D | wait.h | 11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
|
H A D | efs_fs_sb.h | 28 __be32 fs_cgfsize; /* size of cylinder group in bb's */ 29 __be16 fs_cgisize; /* bb's of inodes per cylinder group */ 54 __u32 group_size; /* # of blocks a group consists of */
|
H A D | igmp.h | 34 __be32 group; member in struct:igmphdr 37 /* V3 group record types [grec_type] */ 66 __be32 group; member in struct:igmpv3_query
|
H A D | limits.h | 6 #define NGROUPS_MAX 65536 /* supplemental group IDs are available */
|
H A D | net_dropmon.h | 61 * Our group identifiers
|
H A D | netdevice.h | 36 /* Initial net device group. All devices belong to group 0 by default. */
|
H A D | dqblk_xfs.h | 30 #define XQM_GRPQUOTA 1 /* system call group quota type */ 46 * This contains the current quota information regarding a user/proj/group. 55 __u32 d_id; /* user, project, or group ID */ 127 #define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ 128 #define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ 134 #define FS_GROUP_QUOTA (1<<2) /* group quota type */ 139 * eg. space taken up for user and group quotas, number of dquots currently 158 fs_qfilestat_t qs_gquota; /* group quota storage information */ 170 * space taken up for user, group, and project quotas, number of dquots 203 struct fs_qfilestatv qs_gquota; /* group quota information */
|
/linux-4.1.27/arch/mips/loongson/loongson-3/ |
H A D | smp.h | 13 /* 4 cores in each group(node) */
|
/linux-4.1.27/arch/arm/include/debug/ |
H A D | sirf.S | 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
/linux-4.1.27/arch/blackfin/kernel/ |
H A D | gptimers.c | 218 uint32_t get_gptimer_status(unsigned int group) get_gptimer_status() argument 220 tassert(group < BFIN_TIMER_NUM_GROUP); get_gptimer_status() 221 return bfin_read(&group_regs[group]->data_ilat); get_gptimer_status() 225 void set_gptimer_status(unsigned int group, uint32_t value) set_gptimer_status() argument 227 tassert(group < BFIN_TIMER_NUM_GROUP); set_gptimer_status() 228 bfin_write(&group_regs[group]->data_ilat, value); set_gptimer_status() 233 uint32_t get_gptimer_status(unsigned int group) get_gptimer_status() argument 235 tassert(group < BFIN_TIMER_NUM_GROUP); get_gptimer_status() 236 return bfin_read(&group_regs[group]->status); get_gptimer_status() 240 void set_gptimer_status(unsigned int group, uint32_t value) set_gptimer_status() argument 242 tassert(group < BFIN_TIMER_NUM_GROUP); set_gptimer_status() 243 bfin_write(&group_regs[group]->status, value); set_gptimer_status()
|
/linux-4.1.27/net/netfilter/ |
H A D | xt_devgroup.c | 18 MODULE_DESCRIPTION("Xtables: Device group match"); 27 (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^ devgroup_mt() 32 (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^ devgroup_mt()
|
H A D | xt_NFLOG.c | 33 li.u.ulog.group = info->group; nflog_tg()
|
H A D | xt_cgroup.c | 2 * Xtables module to match the process control group. 23 MODULE_DESCRIPTION("Xtables: process control group matching");
|
/linux-4.1.27/tools/perf/ |
H A D | builtin-evlist.c | 49 OPT_BOOLEAN('g', "group", &details.event_group, cmd_evlist() 50 "Show event group information"), cmd_evlist() 64 pr_err("--group option is not compatible with other options\n"); cmd_evlist()
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192ce/ |
H A D | hw.h | 35 u8 group; rtl92c_get_chnl_group() local 38 group = 0; rtl92c_get_chnl_group() 40 group = 1; rtl92c_get_chnl_group() 42 group = 2; rtl92c_get_chnl_group() 43 return group; rtl92c_get_chnl_group()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | bootinfo.h | 25 * Valid machtype values for group unknown 30 * Valid machtype for group DEC 45 * Valid machtype for group PMC-MSP 56 * Valid machtype for group Mikrotik 78 * Valid machtype for group INGENIC
|
/linux-4.1.27/net/decnet/netfilter/ |
H A D | dn_rtmsg.c | 68 int group = 0; dnrmg_send_peer() local 73 group = DNRNG_NLGRP_L1; dnrmg_send_peer() 76 group = DNRNG_NLGRP_L2; dnrmg_send_peer() 85 NETLINK_CB(skb2).dst_group = group; dnrmg_send_peer() 86 netlink_broadcast(dnrmg, skb2, 0, group, GFP_ATOMIC); dnrmg_send_peer()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_cqm.c | 37 * Groups of events that have the same target(s), one RMID per group. 264 * Events that target same task are placed into the same cache group. __match_event() 382 * Exchange the RMID of a group of events. 385 intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid) intel_cqm_xchg_rmid() argument 388 unsigned int old_rmid = group->hw.cqm_rmid; intel_cqm_xchg_rmid() 389 struct list_head *head = &group->hw.cqm_group_entry; intel_cqm_xchg_rmid() 404 local64_set(&group->count, atomic64_read(&rr.value)); intel_cqm_xchg_rmid() 409 group->hw.cqm_rmid = rmid; intel_cqm_xchg_rmid() 451 * If we have group events waiting for an RMID that don't conflict with 595 * Pick a victim group and move it to the tail of the group list. 596 * @next: The first group without an RMID 609 * The group at the front of the list should always have a valid __intel_cqm_pick_and_rotate() 624 * place them on the back of the group list. 628 struct perf_event *group, *g; intel_cqm_sched_out_conflicting_events() local 633 list_for_each_entry_safe(group, g, &cache_groups, hw.cqm_groups_entry) { intel_cqm_sched_out_conflicting_events() 634 if (group == event) intel_cqm_sched_out_conflicting_events() 637 rmid = group->hw.cqm_rmid; intel_cqm_sched_out_conflicting_events() 648 if (!__conflict_event(group, event)) intel_cqm_sched_out_conflicting_events() 651 intel_cqm_xchg_rmid(group, INVALID_RMID); intel_cqm_sched_out_conflicting_events() 671 * Rotation works by taking away an RMID from a group (the old RMID), 672 * and assigning the free RMID to another group (the new RMID). We must 682 struct perf_event *group, *start = NULL; __intel_cqm_rmid_rotate() local 698 list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) { __intel_cqm_rmid_rotate() 699 if (!__rmid_valid(group->hw.cqm_rmid)) { __intel_cqm_rmid_rotate() 701 start = group; __intel_cqm_rmid_rotate() 721 * We force deallocate the rmid of the group at the head of __intel_cqm_rmid_rotate() 722 * cache_groups. The first event group without an RMID then gets __intel_cqm_rmid_rotate() 823 * Find a group and setup RMID. 825 * If we're part of a group, we use the group's RMID. 828 struct perf_event **group) intel_cqm_setup_event() 838 /* All tasks in a group share an RMID */ intel_cqm_setup_event() 840 *group = iter; intel_cqm_setup_event() 925 * Only the group leader gets to report values. This stops us intel_cqm_event_count() 1047 * If there's another event in this group... intel_cqm_event_destroy() 1057 * And we're the group leader.. intel_cqm_event_destroy() 1062 * destroy the group and return the RMID. intel_cqm_event_destroy() 1081 struct perf_event *group = NULL; intel_cqm_event_init() local 1108 intel_cqm_setup_event(event, &group); intel_cqm_event_init() 1110 if (group) { intel_cqm_event_init() 1112 &group->hw.cqm_group_entry); intel_cqm_event_init() 1121 * We only do this for the group leader, rather than for intel_cqm_event_init() 1122 * every event in a group to save on needless work. intel_cqm_event_init() 827 intel_cqm_setup_event(struct perf_event *event, struct perf_event **group) intel_cqm_setup_event() argument
|
/linux-4.1.27/include/linux/pinctrl/ |
H A D | machine.h | 29 * @group: the name of the group whose mux function is to be configured. This 30 * field may be left NULL, and the first applicable group for the function 32 * @function: the mux function to select for the group 35 const char *group; member in struct:pinctrl_map_mux 41 * @group_or_pin: the name of the pin or group whose configuration parameters 95 .group = grp, \
|
H A D | pinconf.h | 31 * @pin_config_group_get: get configurations for an entire pin group 32 * @pin_config_group_set: configure all pins in a group 37 * per-device info for a certain group in debugfs
|
/linux-4.1.27/Documentation/connector/ |
H A D | cn_test.c | 60 u32 group = 1; 92 ctl->group = group; 117 NETLINK_CB(skb).dst_group = ctl->group; 118 //netlink_broadcast(nls, skb, 0, ctl->group, GFP_ATOMIC); 121 pr_info("request was sent: group=0x%x\n", ctl->group);
|
/linux-4.1.27/kernel/sched/ |
H A D | cpuacct.c | 20 /* Time spent by the tasks of the cpu accounting group executing in ... */ 28 /* track cpu usage of a group of tasks and its child groups */ 41 /* return cpu accounting group to which this task belongs */ task_ca() 58 /* create a new cpu accounting group */ 89 /* destroy an existing cpu accounting group */ cpuacct_css_free() 134 /* return total cpu usage (in nanoseconds) of a group */ cpuusage_read() 231 * charge this task's execution time to its accounting group.
|
H A D | stats.h | 186 * In order to keep a consistent behaviour between thread group cputime cputimer_running() 187 * and thread group cputimer accounting, lets also ignore the cputime cputimer_running() 188 * elapsing after __exit_signal() in any thread group timer running. cputimer_running() 201 * account_group_user_time - Maintain utime for a thread group. 207 * If thread group time is being maintained, get the structure for the 224 * account_group_system_time - Maintain stime for a thread group. 230 * If thread group time is being maintained, get the structure for the 247 * account_group_exec_runtime - Maintain exec runtime for a thread group. 253 * If thread group time is being maintained, get the structure for the
|
/linux-4.1.27/Documentation/filesystems/configfs/ |
H A D | configfs_example_explicit.c | 270 struct config_group group; member in struct:simple_children 275 return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; to_simple_children() 278 static struct config_item *simple_children_make_item(struct config_group *group, const char *name) simple_children_make_item() argument 354 * 03-group-children 356 * This example reuses the simple_children group from above. However, 357 * the simple_children group is not the subsystem itself, it is a 358 * child of the subsystem. Creation of a group in the subsystem creates 359 * a new simple_children group. That group can then have simple_child 363 static struct config_group *group_children_make_group(struct config_group *group, const char *name) group_children_make_group() argument 372 config_group_init_type_name(&simple_children->group, name, group_children_make_group() 375 return &simple_children->group; group_children_make_group() 394 "[03-group-children]\n" group_children_attr_show() 422 .ci_namebuf = "03-group-children",
|
H A D | configfs_example_macros.c | 233 struct config_group group; member in struct:simple_children 238 return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; to_simple_children() 241 static struct config_item *simple_children_make_item(struct config_group *group, const char *name) simple_children_make_item() argument 317 * 03-group-children 319 * This example reuses the simple_children group from above. However, 320 * the simple_children group is not the subsystem itself, it is a 321 * child of the subsystem. Creation of a group in the subsystem creates 322 * a new simple_children group. That group can then have simple_child 326 static struct config_group *group_children_make_group(struct config_group *group, const char *name) group_children_make_group() argument 335 config_group_init_type_name(&simple_children->group, name, group_children_make_group() 338 return &simple_children->group; group_children_make_group() 357 "[03-group-children]\n" group_children_attr_show() 385 .ci_namebuf = "03-group-children",
|
/linux-4.1.27/drivers/connector/ |
H A D | connector.c | 70 * The message is sent to, the portid if given, the group if given, both if 71 * both, or if both are zero then the group is looked up and sent there. 82 u32 group = 0; cn_netlink_send_mult() local 86 group = __group; cn_netlink_send_mult() 93 group = __cbq->group; cn_netlink_send_mult() 103 if (!portid && !netlink_has_listeners(dev->nls, group)) cn_netlink_send_mult() 122 NETLINK_CB(skb).dst_group = group; cn_netlink_send_mult() 124 if (group) cn_netlink_send_mult() 125 return netlink_broadcast(dev->nls, skb, portid, group, cn_netlink_send_mult()
|
/linux-4.1.27/arch/x86/tools/ |
H A D | gen-insn-attr-x86.awk | 21 gid = -1 # group id 150 if (!($2 in group)) 151 semantic_error("No group: " $2 ) 152 gid = group[$2] 169 # print group tables 301 # check if group opcode 303 if (!(opcode in group)) { 304 group[opcode] = ggid 307 flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")") 368 # print group opcode map's array
|
/linux-4.1.27/fs/ufs/ |
H A D | cylinder.c | 25 * Read cylinder group into cache. The memory space for ufs_cg_private_info 45 * We have already the first fragment of cylinder group block in buffer ufs_read_cylinder() 75 ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno); ufs_read_cylinder() 79 * Remove cylinder group from cache, doesn't release memory 80 * allocated for cylinder group (this is done at ufs_put_super only). 121 * Find cylinder group in cache and return it as pointer. 122 * If cylinder group is not in cache, we will load it from disk. 142 * Cylinder group number cg it in cache and it was last used ufs_load_cylinder() 169 * Cylinder group number cg is in cache but it was not last used, ufs_load_cylinder() 183 * Cylinder group number cg is not in cache, we will read it from disk ufs_load_cylinder()
|
H A D | ufs_fs.h | 60 * Each cylinder group has inodes and data. 64 * data and is replicated in each cylinder group to protect against 72 * [fs->fs_cblkno] Cylinder group block 75 * The beginning of cylinder group cg in fs, is given by 173 /*cylinder group encoding */ 211 * Cylinder group macros to locate things in cylinder groups. 212 * They calc file system addresses of cylinder group data structures. 225 * inode number to cylinder group number. 358 __fs32 fs_cgoffset; /* cylinder group offset in cylinder */ 378 __fs32 fs_maxbpg; /* max number of blks per cyl group */ 409 __fs32 fs_cgsize; /* cylinder group size */ 417 __fs32 fs_cpg; /* cylinders per group */ 418 __fs32 fs_ipg; /* inodes per cylinder group */ 419 __fs32 fs_fpg; /* blocks per group * fs_frag */ 510 * Convert cylinder group to base address of its global summary info. 515 * Cylinder group block for a file system. 517 * Writable fields in the cylinder group are protected by the associated 524 * Macros for access to old cylinder group array structures 540 __fs32 cg_cgx; /* we are the cgx'th cylinder group */ 573 __u8 cg_space[1]; /* space for cylinder group maps */ 577 /* Historic Cylinder group info */ 582 __fs32 cg_cgx; /* we are the cgx'th cylinder group */ 636 __fs32 ui_gid; /* 0x74 File group */ 654 __fs32 ui_gid; /* 8: File group. */ 709 __u32 c_cgx; /* number of cylidner group */ 734 __u32 s_cgoffset; /* cylinder group offset in cylinder */ 762 __u32 s_cgsize; /* cylinder group size */ 766 __u32 s_ipg; /* inodes per cylinder group */ 767 __u32 s_fpg; /* fragments per group */
|
/linux-4.1.27/drivers/staging/rtl8723au/hal/ |
H A D | rtl8723a_hal_init.c | 1527 u32 rfPath, eeAddr, group, rfPathMax = 1; Hal_ReadPowerValueFromPROM_8723A() local 1532 for (group = 0; group < MAX_CHNL_GROUP; group++) { Hal_ReadPowerValueFromPROM_8723A() 1534 pwrInfo->CCKIndex[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1536 pwrInfo->HT40_1SIndex[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1538 pwrInfo->HT40_2SIndexDiff[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1540 pwrInfo->HT20IndexDiff[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1542 pwrInfo->OFDMIndexDiff[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1544 pwrInfo->HT40MaxOffset[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1546 pwrInfo->HT20MaxOffset[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1555 for (group = 0; group < MAX_CHNL_GROUP; group++) { Hal_ReadPowerValueFromPROM_8723A() 1557 EEPROM_CCK_TX_PWR_INX_8723A + (rfPath * 3) + group; Hal_ReadPowerValueFromPROM_8723A() 1558 /* pwrInfo->CCKIndex[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1561 &pwrInfo->CCKIndex[rfPath][group]); Hal_ReadPowerValueFromPROM_8723A() 1563 (rfPath * 3) + group; Hal_ReadPowerValueFromPROM_8723A() 1564 /* pwrInfo->HT40_1SIndex[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1567 &pwrInfo->HT40_1SIndex[rfPath][group]); Hal_ReadPowerValueFromPROM_8723A() 1571 for (group = 0; group < MAX_CHNL_GROUP; group++) { Hal_ReadPowerValueFromPROM_8723A() 1573 pwrInfo->HT40_2SIndexDiff[rfPath][group] = 0; Hal_ReadPowerValueFromPROM_8723A() 1574 pwrInfo->HT20IndexDiff[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1577 group] >> (rfPath * 4)) & 0xF; Hal_ReadPowerValueFromPROM_8723A() 1579 if (pwrInfo->HT20IndexDiff[rfPath][group] & BIT(3)) Hal_ReadPowerValueFromPROM_8723A() 1580 pwrInfo->HT20IndexDiff[rfPath][group] |= 0xF0; Hal_ReadPowerValueFromPROM_8723A() 1582 pwrInfo->OFDMIndexDiff[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1584 group] >> (rfPath * 4)) & 0xF; Hal_ReadPowerValueFromPROM_8723A() 1586 pwrInfo->HT40MaxOffset[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1588 group] >> (rfPath * 4)) & 0xF; Hal_ReadPowerValueFromPROM_8723A() 1590 pwrInfo->HT20MaxOffset[rfPath][group] = Hal_ReadPowerValueFromPROM_8723A() 1592 group] >> (rfPath * 4)) & 0xF; Hal_ReadPowerValueFromPROM_8723A() 1601 u8 group = 0; Hal_GetChnlGroup() local 1604 group = 0; Hal_GetChnlGroup() 1606 group = 1; Hal_GetChnlGroup() 1608 group = 2; Hal_GetChnlGroup() 1610 return group; Hal_GetChnlGroup() 1619 u8 rfPath, ch, group, rfPathMax = 1; Hal_EfuseParsetxpowerinfo_8723A() local 1625 group = Hal_GetChnlGroup(ch); Hal_EfuseParsetxpowerinfo_8723A() 1628 pwrInfo.CCKIndex[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1630 pwrInfo.HT40_1SIndex[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1633 pwrInfo.HT20IndexDiff[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1635 pwrInfo.OFDMIndexDiff[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1637 pwrInfo.HT20MaxOffset[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1639 pwrInfo.HT40MaxOffset[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1641 pwr = pwrInfo.HT40_1SIndex[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A() 1642 diff = pwrInfo.HT40_2SIndexDiff[rfPath][group]; Hal_EfuseParsetxpowerinfo_8723A()
|
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/ |
H A D | libcfs_kernelcomm.h | 83 * messages. Mutliple transports may be used within a group, or multiple 86 * use group 0 to signify unicast. 93 int libcfs_kkuc_group_put(int group, void *payload); 94 int libcfs_kkuc_group_add(struct file *fp, int uid, int group, 96 int libcfs_kkuc_group_rem(int uid, int group); 97 int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
|
/linux-4.1.27/drivers/pinctrl/qcom/ |
H A D | pinctrl-msm.h | 36 * this group. The index of the selected function is used 40 * @ctl_reg: Offset of the register holding control bits for this group. 41 * @io_reg: Offset of the register holding input/output bits for this group. 43 * @intr_status_reg: Offset of the register holding the status bits for this group. 45 * from this group. 52 * @intr_enable_bit: Offset in @intr_cfg_reg for enabling the interrupt for this group.
|
/linux-4.1.27/fs/dlm/ |
H A D | config.c | 95 struct config_group group; member in struct:dlm_cluster 313 struct config_group group; member in struct:dlm_space 432 return i ? container_of(to_config_group(i), struct dlm_cluster, group) : config_item_to_cluster() 438 return i ? container_of(to_config_group(i), struct dlm_space, group) : config_item_to_space() 468 config_group_init_type_name(&cl->group, name, &cluster_type); make_cluster() 472 cl->group.default_groups = gps; make_cluster() 473 cl->group.default_groups[0] = &sps->ss_group; make_cluster() 474 cl->group.default_groups[1] = &cms->cs_group; make_cluster() 475 cl->group.default_groups[2] = NULL; make_cluster() 494 return &cl->group; make_cluster() 510 for (j = 0; cl->group.default_groups[j]; j++) { drop_cluster() 511 tmp = &cl->group.default_groups[j]->cg_item; drop_cluster() 512 cl->group.default_groups[j] = NULL; drop_cluster() 525 kfree(cl->group.default_groups); release_cluster() 542 config_group_init_type_name(&sp->group, name, &space_type); make_space() 545 sp->group.default_groups = gps; make_space() 546 sp->group.default_groups[0] = &nds->ns_group; make_space() 547 sp->group.default_groups[1] = NULL; make_space() 552 return &sp->group; make_space() 569 for (j = 0; sp->group.default_groups[j]; j++) { drop_space() 570 tmp = &sp->group.default_groups[j]->cg_item; drop_space() 571 sp->group.default_groups[j] = NULL; drop_space() 581 kfree(sp->group.default_groups); release_space() 901 config_item_put(&sp->group.cg_item); put_space()
|
/linux-4.1.27/scripts/ |
H A D | link-vmlinux.sh | 45 --start-group ${KBUILD_VMLINUX_MAIN} --end-group 58 --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1} 62 -Wl,--start-group \ 64 -Wl,--end-group \
|
/linux-4.1.27/drivers/iio/ |
H A D | industrialio-event.c | 34 * @group: event interface sysfs attribute group 42 struct attribute_group group; member in struct:iio_event_interface 475 indio_dev->event_interface->group.name = iio_event_group_name; iio_device_register_eventset() 476 indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1, iio_device_register_eventset() 477 sizeof(indio_dev->event_interface->group.attrs[0]), iio_device_register_eventset() 479 if (indio_dev->event_interface->group.attrs == NULL) { iio_device_register_eventset() 484 memcpy(indio_dev->event_interface->group.attrs, iio_device_register_eventset() 486 sizeof(indio_dev->event_interface->group.attrs[0]) iio_device_register_eventset() 493 indio_dev->event_interface->group.attrs[attrn++] = iio_device_register_eventset() 496 &indio_dev->event_interface->group; iio_device_register_eventset() 526 kfree(indio_dev->event_interface->group.attrs); iio_device_unregister_eventset()
|
/linux-4.1.27/include/linux/netfilter/ |
H A D | nfnetlink.h | 36 int nfnetlink_has_listeners(struct net *net, unsigned int group); 40 unsigned int group, int echo, gfp_t flags); 41 int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
|
/linux-4.1.27/drivers/mcb/ |
H A D | mcb-internal.h | 60 * @group: the group the device belongs to (0 = no group) 109 unsigned int group:6; member in struct:chameleon_bdd
|
/linux-4.1.27/drivers/mmc/core/ |
H A D | sd_ops.h | 20 int mmc_sd_switch(struct mmc_card *card, int mode, int group,
|
/linux-4.1.27/drivers/pinctrl/mediatek/ |
H A D | pinctrl-mtk-common.h | 78 * struct mtk_drv_group_desc - Provide driving group data. 79 * @max_drv: The maximum current of this group. 80 * @min_drv: The minimum current of this group. 81 * @low_bit: The lowest bit of this group. 82 * @high_bit: The highest bit of this group. 83 * @step: The step current of this group. 107 * @grp: The group for this pin belongs to. 153 * @grp_desc: The driving group info. 154 * @pin_drv_grp: The driving group for all pins.
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ |
H A D | M0203.h | 21 u8 group; member in struct:nvbios_M0203E
|
/linux-4.1.27/drivers/vme/boards/ |
H A D | vme_vmivme7805.h | 6 * Author: Arthur Benilov <arthur.benilov@iba-group.com>
|
/linux-4.1.27/security/keys/encrypted-keys/ |
H A D | ecryptfs_format.h | 6 * TORSEC group -- http://security.polito.it
|
/linux-4.1.27/drivers/pinctrl/sunxi/ |
H A D | pinctrl-sunxi.c | 39 sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group) sunxi_pinctrl_find_group_by_name() argument 46 if (!strcmp(grp->name, group)) sunxi_pinctrl_find_group_by_name() 129 unsigned group) sunxi_pctrl_get_group_name() 133 return pctl->groups[group].name; sunxi_pctrl_get_group_name() 137 unsigned group, sunxi_pctrl_get_group_pins() 143 *pins = (unsigned *)&pctl->groups[group].pin; sunxi_pctrl_get_group_pins() 158 const char *group; sunxi_pctrl_dt_node_to_map() local 185 of_property_for_each_string(node, "allwinner,pins", prop, group) { sunxi_pctrl_dt_node_to_map() 187 sunxi_pinctrl_find_group_by_name(pctl, group); sunxi_pctrl_dt_node_to_map() 191 dev_err(pctl->dev, "unknown pin %s", group); sunxi_pctrl_dt_node_to_map() 199 function, group); sunxi_pctrl_dt_node_to_map() 204 (*map)[i].data.mux.group = group; sunxi_pctrl_dt_node_to_map() 210 (*map)[i].data.configs.group_or_pin = group; sunxi_pctrl_dt_node_to_map() 273 unsigned group, sunxi_pconf_group_get() 278 *config = pctl->groups[group].config; sunxi_pconf_group_get() 284 unsigned group, sunxi_pconf_group_set() 289 struct sunxi_pinctrl_group *g = &pctl->groups[group]; sunxi_pconf_group_set() 399 unsigned group) sunxi_pmx_set_mux() 402 struct sunxi_pinctrl_group *g = pctl->groups + group; sunxi_pmx_set_mux() 779 struct sunxi_pinctrl_group *group = pctl->groups + i; sunxi_pinctrl_build_state() local 781 group->name = pin->pin.name; sunxi_pinctrl_build_state() 782 group->pin = pin->pin.number; sunxi_pinctrl_build_state() 128 sunxi_pctrl_get_group_name(struct pinctrl_dev *pctldev, unsigned group) sunxi_pctrl_get_group_name() argument 136 sunxi_pctrl_get_group_pins(struct pinctrl_dev *pctldev, unsigned group, const unsigned **pins, unsigned *num_pins) sunxi_pctrl_get_group_pins() argument 272 sunxi_pconf_group_get(struct pinctrl_dev *pctldev, unsigned group, unsigned long *config) sunxi_pconf_group_get() argument 283 sunxi_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) sunxi_pconf_group_set() argument 397 sunxi_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned function, unsigned group) sunxi_pmx_set_mux() argument
|
/linux-4.1.27/drivers/staging/rtl8188eu/hal/ |
H A D | rtl8188e_hal_init.c | 355 u32 rfPath, eeAddr = EEPROM_TX_PWR_INX_88E, group, TxCount = 0; Hal_ReadPowerValueFromPROM_8188E() local 362 for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { Hal_ReadPowerValueFromPROM_8188E() 363 pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; Hal_ReadPowerValueFromPROM_8188E() 364 pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; Hal_ReadPowerValueFromPROM_8188E() 383 for (group = 0; group < MAX_CHNL_GROUP_24G; group++) { Hal_ReadPowerValueFromPROM_8188E() 384 pwrInfo24G->IndexCCK_Base[rfPath][group] = PROMContent[eeAddr++]; Hal_ReadPowerValueFromPROM_8188E() 385 if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF) Hal_ReadPowerValueFromPROM_8188E() 386 pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; Hal_ReadPowerValueFromPROM_8188E() 388 for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) { Hal_ReadPowerValueFromPROM_8188E() 389 pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++]; Hal_ReadPowerValueFromPROM_8188E() 390 if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF) Hal_ReadPowerValueFromPROM_8188E() 391 pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX; Hal_ReadPowerValueFromPROM_8188E() 532 u8 rfPath, ch, group; Hal_ReadTxPowerInfo88E() local 542 bIn24G = Hal_GetChnlGroup88E(ch, &group); Hal_ReadTxPowerInfo88E() 544 pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group]; Hal_ReadTxPowerInfo88E() 548 pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group]; Hal_ReadTxPowerInfo88E()
|
/linux-4.1.27/drivers/ps3/ |
H A D | ps3-lpm.c | 672 static u64 pm_signal_group_to_ps3_lv1_signal_group(u64 group) pm_signal_group_to_ps3_lv1_signal_group() argument 681 if (group < 1000) { pm_signal_group_to_ps3_lv1_signal_group() 682 if (group < 100) { pm_signal_group_to_ps3_lv1_signal_group() 683 if (20 <= group && group < 30) { pm_signal_group_to_ps3_lv1_signal_group() 685 subgroup = group - 20; pm_signal_group_to_ps3_lv1_signal_group() 686 } else if (30 <= group && group < 40) { pm_signal_group_to_ps3_lv1_signal_group() 688 subgroup = group - 30; pm_signal_group_to_ps3_lv1_signal_group() 689 } else if (40 <= group && group < 50) { pm_signal_group_to_ps3_lv1_signal_group() 691 subgroup = group - 40; pm_signal_group_to_ps3_lv1_signal_group() 692 } else if (50 <= group && group < 60) { pm_signal_group_to_ps3_lv1_signal_group() 694 subgroup = group - 50; pm_signal_group_to_ps3_lv1_signal_group() 695 } else if (60 <= group && group < 70) { pm_signal_group_to_ps3_lv1_signal_group() 697 subgroup = group - 60; pm_signal_group_to_ps3_lv1_signal_group() 698 } else if (70 <= group && group < 80) { pm_signal_group_to_ps3_lv1_signal_group() 700 subgroup = group - 70; pm_signal_group_to_ps3_lv1_signal_group() 701 } else if (80 <= group && group < 90) { pm_signal_group_to_ps3_lv1_signal_group() 703 subgroup = group - 80; pm_signal_group_to_ps3_lv1_signal_group() 705 } else if (200 <= group && group < 300) { pm_signal_group_to_ps3_lv1_signal_group() 707 subgroup = group - 200; pm_signal_group_to_ps3_lv1_signal_group() 708 } else if (600 <= group && group < 700) { pm_signal_group_to_ps3_lv1_signal_group() 711 subsubgroup = group - 650; pm_signal_group_to_ps3_lv1_signal_group() 713 } else if (6000 <= group && group < 7000) { pm_signal_group_to_ps3_lv1_signal_group() 716 subsubgroup = group - 6500; pm_signal_group_to_ps3_lv1_signal_group() 737 __LINE__, group); pm_signal_group_to_ps3_lv1_signal_group() local
|
/linux-4.1.27/net/bridge/ |
H A D | br_multicast.c | 350 __be32 group) br_ip4_multicast_alloc_query() 401 ih->code = (group ? br->multicast_last_member_interval : br_ip4_multicast_alloc_query() 404 ih->group = group; br_ip4_multicast_alloc_query() 417 const struct in6_addr *group) br_ip6_multicast_alloc_query() 473 interval = ipv6_addr_any(group) ? br_ip6_multicast_alloc_query() 482 mldq->mld_mca = *group; br_ip6_multicast_alloc_query() 514 struct br_ip *group, int hash) br_multicast_get_group() 526 if (unlikely(br_ip_equal(group, &mp->addr))) br_multicast_get_group() 587 struct net_bridge_port *port, struct br_ip *group) br_multicast_new_group() 602 hash = br_ip_hash(mdb, group); br_multicast_new_group() 603 mp = br_multicast_get_group(br, port, group, hash); br_multicast_new_group() 611 hash = br_ip_hash(mdb, group); br_multicast_new_group() 623 mp->addr = *group; br_multicast_new_group() 636 struct br_ip *group, br_multicast_new_port_group() 646 p->addr = *group; br_multicast_new_port_group() 658 struct br_ip *group) br_multicast_add_group() 671 mp = br_multicast_new_group(br, port, group); br_multicast_add_group() 691 p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY); br_multicast_add_group() 695 br_mdb_notify(br->dev, port, group, RTM_NEWMDB); br_multicast_add_group() 709 __be32 group, br_ip4_multicast_add_group() 714 if (ipv4_is_local_multicast(group)) br_ip4_multicast_add_group() 717 br_group.u.ip4 = group; br_ip4_multicast_add_group() 727 const struct in6_addr *group, br_ip6_multicast_add_group() 732 if (ipv6_addr_is_ll_all_nodes(group)) br_ip6_multicast_add_group() 735 br_group.u.ip6 = *group; br_ip6_multicast_add_group() 976 __be32 group; br_ip4_multicast_igmp3_report() local 991 group = grec->grec_mca; br_ip4_multicast_igmp3_report() 1012 err = br_ip4_multicast_add_group(br, port, group, vid); br_ip4_multicast_igmp3_report() 1233 __be32 group; br_ip4_multicast_query() local 1241 group = ih->group; br_ip4_multicast_query() 1248 group = 0; br_ip4_multicast_query() 1267 if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) { br_ip4_multicast_query() 1272 if (!group) { br_ip4_multicast_query() 1281 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); br_ip4_multicast_query() 1322 const struct in6_addr *group = NULL; br_ip6_multicast_query() local 1345 group = &mld->mld_mca; br_ip6_multicast_query() 1353 group = &mld2q->mld2q_mca; br_ip6_multicast_query() 1358 is_general_query = group && ipv6_addr_any(group); br_ip6_multicast_query() 1375 } else if (!group) { br_ip6_multicast_query() 1379 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); br_ip6_multicast_query() 1408 struct br_ip *group, br_multicast_leave_group() 1425 mp = br_mdb_ip_get(mdb, group); br_multicast_leave_group() 1467 br_mdb_notify(br->dev, port, group, RTM_DELMDB); br_multicast_leave_group() 1512 __be32 group, br_ip4_multicast_leave_group() 1518 if (ipv4_is_local_multicast(group)) br_ip4_multicast_leave_group() 1523 br_group.u.ip4 = group; br_ip4_multicast_leave_group() 1534 const struct in6_addr *group, br_ip6_multicast_leave_group() 1540 if (ipv6_addr_is_ll_all_nodes(group)) br_ip6_multicast_leave_group() 1545 br_group.u.ip6 = *group; br_ip6_multicast_leave_group() 1632 err = br_ip4_multicast_add_group(br, port, ih->group, vid); br_multicast_ipv4_rcv() 1641 br_ip4_multicast_leave_group(br, port, ih->group, vid); br_multicast_ipv4_rcv() 2182 struct net_bridge_port_group *group; br_multicast_list_adjacent() local 2200 hlist_for_each_entry_rcu(group, &port->mglist, mglist) { br_multicast_list_adjacent() 2205 entry->addr = group->addr; br_multicast_list_adjacent() 349 br_ip4_multicast_alloc_query(struct net_bridge *br, __be32 group) br_ip4_multicast_alloc_query() argument 416 br_ip6_multicast_alloc_query(struct net_bridge *br, const struct in6_addr *group) br_ip6_multicast_alloc_query() argument 512 br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) br_multicast_get_group() argument 586 br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) br_multicast_new_group() argument 634 br_multicast_new_port_group( struct net_bridge_port *port, struct br_ip *group, struct net_bridge_port_group __rcu *next, unsigned char state) br_multicast_new_port_group() argument 656 br_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) br_multicast_add_group() argument 707 br_ip4_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group, __u16 vid) br_ip4_multicast_add_group() argument 725 br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group, __u16 vid) br_ip6_multicast_add_group() argument 1406 br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, struct bridge_mcast_other_query *other_query, struct bridge_mcast_own_query *own_query) br_multicast_leave_group() argument 1510 br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group, __u16 vid) br_ip4_multicast_leave_group() argument 1532 br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group, __u16 vid) br_ip6_multicast_leave_group() argument
|
/linux-4.1.27/include/media/davinci/ |
H A D | vpif_types.h | 63 int *asd_sizes; /* 0-terminated array of asd group sizes */ 87 int *asd_sizes; /* 0-terminated array of asd group sizes */
|
/linux-4.1.27/net/bridge/netfilter/ |
H A D | ebt_nflog.c | 31 li.u.ulog.group = info->group; ebt_nflog_tg()
|
/linux-4.1.27/drivers/s390/cio/ |
H A D | ccwgroup.c | 48 * Remove references from ccw devices to ccw group device and from 49 * ccw group device to ccw devices. 168 * Provide an 'ungroup' attribute so the user can remove group devices no 305 * ccwgroup_create_dev() - create and register a ccw group device 307 * @gdrv: driver for the new group device 311 * Create and register a new ccw group device as a child of @parent. Slave 356 /* Don't allow a device to belong to more than one group. */ ccwgroup_create_dev() 555 * ccwgroup_driver_register() - register a ccw group driver 575 * ccwgroup_driver_unregister() - deregister a ccw group driver 601 * a ccw group device. 616 * group device. It sets the ccw device offline and also deregisters the 617 * embedding ccw group device. 625 /* If one of its devices is gone, the whole group is done for. */ ccwgroup_remove_ccwdev() 635 /* Unregister group device. */ ccwgroup_remove_ccwdev()
|
/linux-4.1.27/drivers/media/platform/vivid/ |
H A D | vivid-rds-gen.c | 43 * This RDS generator creates 57 RDS groups (one group == four RDS blocks). 45 * standard 0B group containing the PI code and PS name. 47 * Groups 4-19 and 26-41 use group 2A for the radio text. 49 * Group 56 contains the time (group 4A). 51 * All remaining groups use a filler group 15B block that just repeats
|
/linux-4.1.27/drivers/pinctrl/nomadik/ |
H A D | pinctrl-nomadik.h | 106 * struct nmk_pingroup - describes a Nomadik pin group 107 * @name: the name of this specific pin group 108 * @pins: an array of discrete physical pins used in this group, taken 110 * @num_pins: the number of pins in this group array, i.e. the number of 112 * @altsetting: the altsetting to apply to all pins in this group to
|
/linux-4.1.27/arch/arm/mach-s3c64xx/include/mach/ |
H A D | irqs.h | 128 * The IRQ_EINT(x) can be thought of as 'group 0' of the available GPIO 132 * Use IRQ_EINT_GROUP(group, offset) to get the number for use in the 157 #define IRQ_EINT_GROUP(group, no) (IRQ_EINT_GROUP##group##_BASE + (no)) 159 /* Define a group of interrupts for board-specific use (eg, for MFD
|
/linux-4.1.27/drivers/base/ |
H A D | devres.c | 62 * Release functions for devres group. These callbacks are used only 427 /* clear color of group markers in the first pass */ remove_nodes() 442 /* Second pass - Scan groups and color them. A group gets remove_nodes() 443 * color value of two iff the group is wholly contained in remove_nodes() 444 * [cur, end). That is, for a closed group, both opening and remove_nodes() 446 * opening marker is enough for an open group. remove_nodes() 520 * devres_open_group - Open a new devres group 521 * @dev: Device to open devres group for 525 * Open a new devres group for @dev with @id. For @id, using a 526 * pointer to an object which won't be used for another group is 530 * ID of the new group, NULL on failure. 558 /* Find devres group with ID @id. If @id is NULL, look for the latest. */ find_group() 582 * devres_close_group - Close a devres group 583 * @dev: Device to close devres group for 584 * @id: ID of target group, can be NULL 586 * Close the group identified by @id. If @id is NULL, the latest open 587 * group is selected. 607 * devres_remove_group - Remove a devres group 608 * @dev: Device to remove group for 609 * @id: ID of target group, can be NULL 611 * Remove the group identified by @id. If @id is NULL, the latest 612 * open group is selected. Note that removing a group doesn't affect 637 * devres_release_group - Release resources in a devres group 638 * @dev: Device to release group for 639 * @id: ID of target group, can be NULL 641 * Release all resources in the group identified by @id. If @id is 642 * NULL, the latest open group is selected. The selected group and 643 * groups properly nested inside the selected group are removed. 646 * The number of released non-group resources.
|
/linux-4.1.27/drivers/scsi/isci/ |
H A D | remote_node_table.c | 69 * @group_table_index: This is the index to the group table from which to 75 * group. 108 * This method will clear the group index entry in the specified group index 138 * This method will set the group index bit entry in the specified gropu index 259 * THis method sets an entire remote node group in the remote node table. 284 * @remote_node_table: This is the remote node table that for which the group 286 * @group_index: This is the group index to use to find the group value. 288 * This method will return the group value for the specified group index. The 289 * bit values at the specified remote node group index. 371 * @table_index: The group index that is to be used for the search. 374 * table index will determine from which remote node group table to search. 375 * This search may fail and another group node table can be specified. The 377 * group up to the triple remote node group. If an entry is found in the 431 * @group_table_index: THis is the group table index which must equal two (2) 557 * This method will release a group of three consecutive remote nodes back to
|
/linux-4.1.27/arch/powerpc/platforms/52xx/ |
H A D | mpc52xx_pic.c | 26 * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep 27 * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC, 28 * gpios, and the general purpose timers. Peripheral group contains the 43 * value). Within each group individual irq sources are also assigned a 48 * For example, the TMR0 interrupt is irq 9 in the main group. The 52 * interrupt group called 'bestcomm'. The bestcomm group isn't physically 56 * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines 64 * group, one for the peripherals group, one for the bestcomm group and one 66 * to manipulate each IRQ source, and since each group is has a separate set 70 * You'll notice that there is not an irq_chip for the critical group and 72 * interrupts even though there is no external interrupt group. The reason 74 * register even though one of the external IRQs is in the critical group and 75 * the other three are in the main group. For this reason it makes sense for 78 * group, only external interrupt is actually support at this time by this 86 * first cell is the group number [0..3], the second cell is the irq 87 * number in the group, and the third cell is the sense type (level/edge).
|
/linux-4.1.27/drivers/pinctrl/vt8500/ |
H A D | pinctrl-wmt.c | 221 int group; wmt_pctl_dt_node_to_map_func() local 229 group = wmt_pctl_find_group_by_pin(data, pin); wmt_pctl_dt_node_to_map_func() 230 if (group < 0) { wmt_pctl_dt_node_to_map_func() 231 dev_err(data->dev, "unable to match pin %d to group\n", pin); wmt_pctl_dt_node_to_map_func() 232 return group; wmt_pctl_dt_node_to_map_func() 236 map->data.mux.group = data->groups[group]; wmt_pctl_dt_node_to_map_func() 248 int group; wmt_pctl_dt_node_to_map_pull() local 257 group = wmt_pctl_find_group_by_pin(data, pin); wmt_pctl_dt_node_to_map_pull() 258 if (group < 0) { wmt_pctl_dt_node_to_map_pull() 259 dev_err(data->dev, "unable to match pin %d to group\n", pin); wmt_pctl_dt_node_to_map_pull() 260 return group; wmt_pctl_dt_node_to_map_pull() 283 map->data.configs.group_or_pin = data->groups[group]; wmt_pctl_dt_node_to_map_pull()
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | perf_event_v7.c | 1155 * CC = class of events the group G is choosing from 1156 * G = group or particular event 1158 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2 1269 unsigned int group = EVENT_GROUP(config_base); krait_evt_setup() local 1274 group_shift = group * 8; krait_evt_setup() 1277 /* Configure evtsel for the region and group */ krait_evt_setup() 1282 val += group; krait_evt_setup() 1304 static u32 clear_pmresrn_group(u32 val, int group) clear_pmresrn_group() argument 1309 group_shift = group * 8; clear_pmresrn_group() 1325 unsigned int group = EVENT_GROUP(config_base); krait_clearpmu() local 1331 val = clear_pmresrn_group(val, group); krait_clearpmu() 1336 val = clear_pmresrn_group(val, group); krait_clearpmu() 1429 unsigned int group) krait_event_to_bit() 1440 bit += group; krait_event_to_bit() 1452 * Two events cant use the same group within a pmresr register. 1462 unsigned int group = EVENT_GROUP(hwc->config_base); krait_pmu_get_event_idx() local 1468 if (group > 3 || region > 2) krait_pmu_get_event_idx() 1473 bit = krait_event_to_bit(event, region, group); krait_pmu_get_event_idx() 1491 unsigned int group = EVENT_GROUP(hwc->config_base); krait_pmu_clear_event_idx() local 1496 bit = krait_event_to_bit(event, region, group); krait_pmu_clear_event_idx() 1545 * CC = class of events the group G is choosing from 1546 * G = group or particular event 1548 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 1616 unsigned int group = EVENT_GROUP(config_base); scorpion_evt_setup() local 1621 group_shift = group * 8; scorpion_evt_setup() 1624 /* Configure evtsel for the region and group */ scorpion_evt_setup() 1629 val += group; scorpion_evt_setup() 1658 unsigned int group = EVENT_GROUP(config_base); scorpion_clearpmu() local 1664 val = clear_pmresrn_group(val, group); scorpion_clearpmu() 1669 val = clear_pmresrn_group(val, group); scorpion_clearpmu() 1762 unsigned int group) scorpion_event_to_bit() 1773 bit += group; scorpion_event_to_bit() 1785 * Two events cant use the same group within a pmresr register. 1794 unsigned int group = EVENT_GROUP(hwc->config_base); scorpion_pmu_get_event_idx() local 1800 if (group > 3 || region > 3) scorpion_pmu_get_event_idx() 1803 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_get_event_idx() 1821 unsigned int group = EVENT_GROUP(hwc->config_base); scorpion_pmu_clear_event_idx() local 1826 bit = scorpion_event_to_bit(event, region, group); scorpion_pmu_clear_event_idx() 1428 krait_event_to_bit(struct perf_event *event, unsigned int region, unsigned int group) krait_event_to_bit() argument 1761 scorpion_event_to_bit(struct perf_event *event, unsigned int region, unsigned int group) scorpion_event_to_bit() argument
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192de/ |
H A D | hw.c | 1433 u32 rfpath, eeaddr, group, offset1, offset2; _rtl92de_readpowervalue_fromprom() local 1438 for (group = 0; group < CHANNEL_GROUP_MAX; group++) { _rtl92de_readpowervalue_fromprom() 1440 if (group < CHANNEL_GROUP_MAX_2G) { _rtl92de_readpowervalue_fromprom() 1441 pwrinfo->cck_index[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1443 pwrinfo->ht40_1sindex[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1446 pwrinfo->ht40_1sindex[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1449 pwrinfo->ht40_2sindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1451 pwrinfo->ht20indexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1453 pwrinfo->ofdmindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1455 pwrinfo->ht40maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1457 pwrinfo->ht20maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1471 for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) { _rtl92de_readpowervalue_fromprom() 1473 + group; _rtl92de_readpowervalue_fromprom() 1474 pwrinfo->cck_index[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1483 for (group = 0; group < CHANNEL_GROUP_MAX; group++) { _rtl92de_readpowervalue_fromprom() 1484 offset1 = group / 3; _rtl92de_readpowervalue_fromprom() 1485 offset2 = group % 3; _rtl92de_readpowervalue_fromprom() 1488 pwrinfo->ht40_1sindex[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1496 for (group = 0; group < CHANNEL_GROUP_MAX; group++) { _rtl92de_readpowervalue_fromprom() 1500 offset1 = group / 3; _rtl92de_readpowervalue_fromprom() 1501 offset2 = group % 3; _rtl92de_readpowervalue_fromprom() 1504 pwrinfo->ht40_2sindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1509 pwrinfo->ht40_2sindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1513 pwrinfo->ht20indexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1518 pwrinfo->ht20indexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1522 pwrinfo->ofdmindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1527 pwrinfo->ofdmindexdiff[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1531 pwrinfo->ht40maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1536 pwrinfo->ht40maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1540 pwrinfo->ht20maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1545 pwrinfo->ht20maxoffset[rfpath][group] = _rtl92de_readpowervalue_fromprom() 1579 u32 ch, rfPath, group; _rtl92de_read_txpower_info() local 1661 group = rtl92d_get_chnlgroup_fromarray((u8) ch); _rtl92de_read_txpower_info() 1664 pwrinfo.cck_index[rfPath][group]; _rtl92de_read_txpower_info() 1666 pwrinfo.ht40_1sindex[rfPath][group]; _rtl92de_read_txpower_info() 1668 pwrinfo.ht20indexdiff[rfPath][group]; _rtl92de_read_txpower_info() 1670 pwrinfo.ofdmindexdiff[rfPath][group]; _rtl92de_read_txpower_info() 1672 pwrinfo.ht20maxoffset[rfPath][group]; _rtl92de_read_txpower_info() 1674 pwrinfo.ht40maxoffset[rfPath][group]; _rtl92de_read_txpower_info() 1675 pwr = pwrinfo.ht40_1sindex[rfPath][group]; _rtl92de_read_txpower_info() 1676 diff = pwrinfo.ht40_2sindexdiff[rfPath][group]; _rtl92de_read_txpower_info() 2274 "set group key\n"); rtl92de_set_key()
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/rtl8192ee/ |
H A D | hw.c | 1738 u8 group = 0; _rtl92ee_get_chnl_group() local 1742 group = 0; _rtl92ee_get_chnl_group() 1744 group = 1; _rtl92ee_get_chnl_group() 1746 group = 2; _rtl92ee_get_chnl_group() 1748 group = 3; _rtl92ee_get_chnl_group() 1750 group = 4; _rtl92ee_get_chnl_group() 1753 group = 0; _rtl92ee_get_chnl_group() 1755 group = 1; _rtl92ee_get_chnl_group() 1757 group = 2; _rtl92ee_get_chnl_group() 1759 group = 3; _rtl92ee_get_chnl_group() 1761 group = 4; _rtl92ee_get_chnl_group() 1763 group = 5; _rtl92ee_get_chnl_group() 1765 group = 6; _rtl92ee_get_chnl_group() 1767 group = 7; _rtl92ee_get_chnl_group() 1769 group = 8; _rtl92ee_get_chnl_group() 1771 group = 9; _rtl92ee_get_chnl_group() 1773 group = 10; _rtl92ee_get_chnl_group() 1775 group = 11; _rtl92ee_get_chnl_group() 1777 group = 12; _rtl92ee_get_chnl_group() 1779 group = 13; _rtl92ee_get_chnl_group() 1781 return group; _rtl92ee_get_chnl_group() 1790 u32 rf, addr = EEPROM_TX_PWR_INX, group, i = 0; _rtl8192ee_read_power_value_fromprom() local 1803 for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { _rtl8192ee_read_power_value_fromprom() 1804 pwr2g->index_cck_base[rf][group] = 0x2D; _rtl8192ee_read_power_value_fromprom() 1805 pwr2g->index_bw40_base[rf][group] = 0x2D; _rtl8192ee_read_power_value_fromprom() 1820 for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) _rtl8192ee_read_power_value_fromprom() 1821 pwr5g->index_bw40_base[rf][group] = 0x2A; _rtl8192ee_read_power_value_fromprom() 1845 for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { _rtl8192ee_read_power_value_fromprom() 1846 pwr2g->index_cck_base[rf][group] = hwinfo[addr++]; _rtl8192ee_read_power_value_fromprom() 1847 if (pwr2g->index_cck_base[rf][group] == 0xFF) _rtl8192ee_read_power_value_fromprom() 1848 pwr2g->index_cck_base[rf][group] = 0x2D; _rtl8192ee_read_power_value_fromprom() 1850 for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) { _rtl8192ee_read_power_value_fromprom() 1851 pwr2g->index_bw40_base[rf][group] = hwinfo[addr++]; _rtl8192ee_read_power_value_fromprom() 1852 if (pwr2g->index_bw40_base[rf][group] == 0xFF) _rtl8192ee_read_power_value_fromprom() 1853 pwr2g->index_bw40_base[rf][group] = 0x2D; _rtl8192ee_read_power_value_fromprom() 1919 for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) { _rtl8192ee_read_power_value_fromprom() 1920 pwr5g->index_bw40_base[rf][group] = hwinfo[addr++]; _rtl8192ee_read_power_value_fromprom() 1921 if (pwr5g->index_bw40_base[rf][group] == 0xFF) _rtl8192ee_read_power_value_fromprom() 1922 pwr5g->index_bw40_base[rf][group] = 0xFE; _rtl8192ee_read_power_value_fromprom() 2578 "set group key\n"); rtl92ee_set_key()
|
/linux-4.1.27/fs/nfsd/ |
H A D | nfs4acl.c | 192 unsigned short group; member in struct:posix_acl_summary 219 pas->group = pa->e_perm; FOREACH_ACL_ENTRY() 237 pas->group &= pas->mask; 265 deny &= pas.users | pas.group | pas.groups | pas.other; _posix_to_nfsv4_one() 285 deny &= pas.groups | pas.group | pas.other; _posix_to_nfsv4_one() 307 * since a user can be in more than one group. */ _posix_to_nfsv4_one() 315 ace->access_mask = mask_from_posix(pas.group, flags); _posix_to_nfsv4_one() 337 deny = ~pas.group & pas.other; _posix_to_nfsv4_one() 458 struct posix_ace_state group; member in struct:posix_acl_state 475 * named user or group, but we don't no which, so we allocate init_state() 546 low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); posix_state_to_acl() 547 add_to_mask(state, &state->group); posix_state_to_acl() 661 allow_bits(&state->group, mask); process_one_v4_ace() 663 deny_bits(&state->group, mask); process_one_v4_ace() 664 mask = state->group.deny; process_one_v4_ace() 679 deny_bits(&state->group, mask); process_one_v4_ace() 688 allow_bits(&state->group, mask); process_one_v4_ace() 695 deny_bits(&state->group, mask); process_one_v4_ace()
|
/linux-4.1.27/drivers/pinctrl/berlin/ |
H A D | berlin.c | 45 unsigned group) berlin_pinctrl_get_group_name() 49 return pctrl->desc->groups[group].name; berlin_pinctrl_get_group_name() 139 const struct berlin_desc_group *group, berlin_pinctrl_find_function_by_name() 142 struct berlin_desc_function *function = group->functions; berlin_pinctrl_find_function_by_name() 156 unsigned group) berlin_pinmux_set() 159 const struct berlin_desc_group *group_desc = pctrl->desc->groups + group; berlin_pinmux_set() 216 /* compute the maxiumum number of functions a group can have */ berlin_pinctrl_build_state() 44 berlin_pinctrl_get_group_name(struct pinctrl_dev *pctrl_dev, unsigned group) berlin_pinctrl_get_group_name() argument 138 berlin_pinctrl_find_function_by_name(struct berlin_pinctrl *pctrl, const struct berlin_desc_group *group, const char *fname) berlin_pinctrl_find_function_by_name() argument 154 berlin_pinmux_set(struct pinctrl_dev *pctrl_dev, unsigned function, unsigned group) berlin_pinmux_set() argument
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_ialloc.h | 62 * inode header of the allocation group and alloc_done set to true. 120 struct xfs_buf *bp, /* allocation group header buffer */ 124 * Read in the allocation group header (inode allocation section) 130 xfs_agnumber_t agno, /* allocation group number */ 131 struct xfs_buf **bpp); /* allocation group hdr buf */ 134 * Read in the allocation group header to initialise the per-ag data 141 xfs_agnumber_t agno); /* allocation group number */
|
/linux-4.1.27/drivers/pinctrl/samsung/ |
H A D | pinctrl-exynos5440.c | 79 * struct exynos5440_pin_group: represent group of pins for pincfg setting. 80 * @name: name of the pin group, used to lookup the group. 81 * @pins: the pins included in this group. 82 * @num_pins: number of pins included in this group. 147 /* check if the selector is a valid pin group selector */ exynos5440_get_group_count() 156 /* return the name of the group selected by the group selector */ exynos5440_get_group_name() 166 /* return the pin numbers associated with the specified group */ exynos5440_get_group_pins() 217 * Allocate memory for pin group name. The pin group name is derived exynos5440_dt_node_to_map() 222 dev_err(dev, "failed to alloc memory for group name\n"); exynos5440_dt_node_to_map() 267 map[*nmaps].data.mux.group = gname; exynos5440_dt_node_to_map() 295 kfree(map[idx].data.mux.group); exynos5440_dt_free_map() 349 unsigned group, bool enable) exynos5440_pinmux_setup() 371 unsigned group) exynos5440_pinmux_set_mux() 373 exynos5440_pinmux_setup(pctldev, selector, group, true); exynos5440_pinmux_set_mux() 513 /* set the pin config settings for a specified pin group */ exynos5440_pinconf_group_set() 515 unsigned group, unsigned long *configs, exynos5440_pinconf_group_set() 523 pins = priv->pin_groups[group].pins; exynos5440_pinconf_group_set() 525 for (cnt = 0; cnt < priv->pin_groups[group].num_pins; cnt++) exynos5440_pinconf_group_set() 532 /* get the pin config settings for a specified pin group */ exynos5440_pinconf_group_get() 534 unsigned int group, unsigned long *config) exynos5440_pinconf_group_get() 540 pins = priv->pin_groups[group].pins; exynos5440_pinconf_group_get() 688 dev_err(dev, "failed allocate memory for ping group list\n"); exynos5440_pinctrl_parse_dt() 714 /* derive pin group name from the node name */ for_each_child_of_node() 718 dev_err(dev, "failed to alloc memory for group name\n"); for_each_child_of_node() 748 dev_err(dev, "failed to alloc memory for group list " for_each_child_of_node() 348 exynos5440_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector, unsigned group, bool enable) exynos5440_pinmux_setup() argument 369 exynos5440_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) exynos5440_pinmux_set_mux() argument 514 exynos5440_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group, unsigned long *configs, unsigned num_configs) exynos5440_pinconf_group_set() argument 533 exynos5440_pinconf_group_get(struct pinctrl_dev *pctldev, unsigned int group, unsigned long *config) exynos5440_pinconf_group_get() argument
|
/linux-4.1.27/tools/perf/scripts/python/ |
H A D | event_analyzing_sample.py | 132 commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") 139 symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") 145 dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") 165 commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") 172 symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") 177 dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") 183 latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
|
/linux-4.1.27/mm/ |
H A D | percpu.c | 140 /* group information, used for vm allocation */ 1428 int group, v; pcpu_dump_alloc_info() local 1448 for (group = 0; group < ai->nr_groups; group++) { pcpu_dump_alloc_info() 1449 const struct pcpu_group_info *gi = &ai->groups[group]; pcpu_dump_alloc_info() 1459 printk(KERN_CONT "[%0*d] ", group_width, group); pcpu_dump_alloc_info() 1511 * same group. Dynamic VM areas will be allocated according to these 1512 * groupings. If @ai->nr_groups is zero, a single group containing 1541 int group, unit, i; pcpu_setup_first_chunk() local 1567 /* process group information and build config tables accordingly */ pcpu_setup_first_chunk() 1581 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { pcpu_setup_first_chunk() 1582 const struct pcpu_group_info *gi = &ai->groups[group]; pcpu_setup_first_chunk() 1584 group_offsets[group] = gi->base_offset; pcpu_setup_first_chunk() 1585 group_sizes[group] = gi->nr_units * ai->unit_size; pcpu_setup_first_chunk() 1767 * units in the same group. The returned configuration is guaranteed 1786 int last_allocs, group, unit; pcpu_build_alloc_info() local 1814 /* group cpus according to their proximity */ for_each_possible_cpu() 1816 group = 0; for_each_possible_cpu() 1821 if (group_map[tcpu] == group && cpu_distance_fn && for_each_possible_cpu() 1824 group++; for_each_possible_cpu() 1825 nr_groups = max(nr_groups, group + 1); for_each_possible_cpu() 1829 group_map[cpu] = group; 1830 group_cnt[group]++; 1845 for (group = 0; group < nr_groups; group++) { 1846 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1848 wasted += this_allocs * upa - group_cnt[group]; 1868 for (group = 0; group < nr_groups; group++) 1869 nr_units += roundup(group_cnt[group], upa); 1876 for (group = 0; group < nr_groups; group++) { 1877 ai->groups[group].cpu_map = cpu_map; 1878 cpu_map += roundup(group_cnt[group], upa); 1888 for (group = 0, unit = 0; group_cnt[group]; group++) { 1889 struct pcpu_group_info *gi = &ai->groups[group]; 1899 if (group_map[cpu] == group) 1953 int group, i, rc; pcpu_embed_first_chunk() local 1970 for (group = 0; group < ai->nr_groups; group++) { pcpu_embed_first_chunk() 1971 struct pcpu_group_info *gi = &ai->groups[group]; pcpu_embed_first_chunk() 1979 /* allocate space for the whole group */ pcpu_embed_first_chunk() 1987 areas[group] = ptr; pcpu_embed_first_chunk() 1997 for (group = 0; group < ai->nr_groups; group++) { pcpu_embed_first_chunk() 1998 struct pcpu_group_info *gi = &ai->groups[group]; pcpu_embed_first_chunk() 1999 void *ptr = areas[group]; pcpu_embed_first_chunk() 2013 /* base address is now known, determine group base offsets */ pcpu_embed_first_chunk() 2015 for (group = 0; group < ai->nr_groups; group++) { pcpu_embed_first_chunk() 2016 ai->groups[group].base_offset = areas[group] - base; pcpu_embed_first_chunk() 2018 ai->groups[group].base_offset); pcpu_embed_first_chunk() 2042 for (group = 0; group < ai->nr_groups; group++) pcpu_embed_first_chunk() 2043 if (areas[group]) pcpu_embed_first_chunk() 2044 free_fn(areas[group], pcpu_embed_first_chunk() 2045 ai->groups[group].nr_units * ai->unit_size); pcpu_embed_first_chunk()
|
/linux-4.1.27/fs/quota/ |
H A D | netlink.c | 19 * Needed due to multicast group ID abuse - old code assumed 20 * the family ID was also a valid multicast group ID (which 22 * static ID for this group to make dealing with that easier.
|
/linux-4.1.27/arch/s390/kernel/ |
H A D | compat_linux.c | 90 u16, user, u16, group) COMPAT_SYSCALL_DEFINE3() 92 return sys_chown(filename, low2highuid(user), low2highgid(group)); COMPAT_SYSCALL_DEFINE3() 96 filename, u16, user, u16, group) COMPAT_SYSCALL_DEFINE3() 98 return sys_lchown(filename, low2highuid(user), low2highgid(group)); COMPAT_SYSCALL_DEFINE3() 101 COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group) COMPAT_SYSCALL_DEFINE3() 103 return sys_fchown(fd, low2highuid(user), low2highgid(group)); COMPAT_SYSCALL_DEFINE3() 188 u16 group; groups16_to_user() local 193 group = (u16)from_kgid_munged(user_ns, kgid); groups16_to_user() 194 if (put_user(group, grouplist+i)) groups16_to_user() 205 u16 group; groups16_from_user() local 209 if (get_user(group, grouplist+i)) groups16_from_user() 212 kgid = make_kgid(user_ns, (gid_t)group); groups16_from_user()
|
/linux-4.1.27/arch/cris/arch-v32/mach-fs/ |
H A D | dram_init.S | 55 ; Assume that group 0 width is equal to group 1. This assumption 56 ; is wrong for a group 1 only hardware (such as the grand old
|
/linux-4.1.27/arch/ia64/include/asm/sn/ |
H A D | module.h | 19 * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int) 51 * class (0==CPU/mixed, 1==I/O), group, number 74 * group 2 bits for CPU/mixed, 3 bits for I/O
|
/linux-4.1.27/arch/arm/mach-dove/ |
H A D | mpp.c | 24 /* Map a group to a range of GPIO pins in that group */ 120 /* Configure the group registers, enabling GPIO if sel indicates the
|
/linux-4.1.27/samples/kobject/ |
H A D | kobject-example.c | 91 * Create a group of attributes so that we can create and destroy them all 102 * An unnamed attribute group will put all of the attributes directly in 105 * attribute group.
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_fabric_configfs.c | 263 struct config_group *group, target_core_mappedlun_stat_mkdir() 270 struct config_group *group, target_core_mappedlun_stat_rmdir() 330 struct config_group *group, target_fabric_make_mappedlun() 333 struct se_node_acl *se_nacl = container_of(group, target_fabric_make_mappedlun() 344 acl_ci = &group->cg_item; target_fabric_make_mappedlun() 429 struct config_group *group, target_fabric_drop_mappedlun() 495 struct config_group *group, target_fabric_make_nodeacl() 498 struct se_portal_group *se_tpg = container_of(group, target_fabric_make_nodeacl() 509 se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); target_fabric_make_nodeacl() 537 struct config_group *group, target_fabric_drop_nodeacl() 594 struct config_group *group, target_fabric_make_np() 597 struct se_portal_group *se_tpg = container_of(group, target_fabric_make_np() 607 se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); target_fabric_make_np() 619 struct config_group *group, target_fabric_drop_np() 857 struct config_group *group, target_core_port_stat_mkdir() 864 struct config_group *group, target_core_port_stat_rmdir() 882 struct config_group *group, target_fabric_make_lun() 886 struct se_portal_group *se_tpg = container_of(group, target_fabric_make_lun() 937 struct config_group *group, target_fabric_drop_lun() 1042 struct config_group *group, target_fabric_make_tpg() 1045 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); target_fabric_make_tpg() 1054 se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); target_fabric_make_tpg() 1088 struct config_group *group, target_fabric_drop_tpg() 1144 struct config_group *group, target_fabric_make_wwn() 1147 struct target_fabric_configfs *tf = container_of(group, target_fabric_make_wwn() 1156 wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); target_fabric_make_wwn() 1177 struct config_group *group, target_fabric_drop_wwn() 262 target_core_mappedlun_stat_mkdir( struct config_group *group, const char *name) target_core_mappedlun_stat_mkdir() argument 269 target_core_mappedlun_stat_rmdir( struct config_group *group, struct config_item *item) target_core_mappedlun_stat_rmdir() argument 329 target_fabric_make_mappedlun( struct config_group *group, const char *name) target_fabric_make_mappedlun() argument 428 target_fabric_drop_mappedlun( struct config_group *group, struct config_item *item) target_fabric_drop_mappedlun() argument 494 target_fabric_make_nodeacl( struct config_group *group, const char *name) target_fabric_make_nodeacl() argument 536 target_fabric_drop_nodeacl( struct config_group *group, struct config_item *item) target_fabric_drop_nodeacl() argument 593 target_fabric_make_np( struct config_group *group, const char *name) target_fabric_make_np() argument 618 target_fabric_drop_np( struct config_group *group, struct config_item *item) target_fabric_drop_np() argument 856 target_core_port_stat_mkdir( struct config_group *group, const char *name) target_core_port_stat_mkdir() argument 863 target_core_port_stat_rmdir( struct config_group *group, struct config_item *item) target_core_port_stat_rmdir() argument 881 target_fabric_make_lun( struct config_group *group, const char *name) target_fabric_make_lun() argument 936 target_fabric_drop_lun( struct config_group *group, struct config_item *item) target_fabric_drop_lun() argument 1041 target_fabric_make_tpg( struct config_group *group, const char *name) target_fabric_make_tpg() argument 1087 target_fabric_drop_tpg( struct config_group *group, struct config_item *item) target_fabric_drop_tpg() argument 1143 target_fabric_make_wwn( struct config_group *group, const char *name) target_fabric_make_wwn() argument 1176 target_fabric_drop_wwn( struct config_group *group, struct config_item *item) target_fabric_drop_wwn() argument
|