Searched refs:bucket (Results 1 - 200 of 229) sorted by relevance

12

/linux-4.4.14/net/ceph/crush/
H A Dmapper.c57 * bucket choose methods
59 * For each bucket algorithm, we have a "choose" method that, given a
61 * will produce an item in the bucket.
65 * Choose based on a random permutation of the bucket.
69 * calculate an actual random permutation of the bucket members.
73 static int bucket_perm_choose(struct crush_bucket *bucket, bucket_perm_choose() argument
76 unsigned int pr = r % bucket->size; bucket_perm_choose()
80 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { bucket_perm_choose()
81 dprintk("bucket %d new x=%d\n", bucket->id, x); bucket_perm_choose()
82 bucket->perm_x = x; bucket_perm_choose()
86 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % bucket_perm_choose()
87 bucket->size; bucket_perm_choose()
88 bucket->perm[0] = s; bucket_perm_choose()
89 bucket->perm_n = 0xffff; /* magic value, see below */ bucket_perm_choose()
93 for (i = 0; i < bucket->size; i++) bucket_perm_choose()
94 bucket->perm[i] = i; bucket_perm_choose()
95 bucket->perm_n = 0; bucket_perm_choose()
96 } else if (bucket->perm_n == 0xffff) { bucket_perm_choose()
98 for (i = 1; i < bucket->size; i++) bucket_perm_choose()
99 bucket->perm[i] = i; bucket_perm_choose()
100 bucket->perm[bucket->perm[0]] = 0; bucket_perm_choose()
101 bucket->perm_n = 1; bucket_perm_choose()
105 for (i = 0; i < bucket->perm_n; i++) bucket_perm_choose()
106 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); bucket_perm_choose()
107 while (bucket->perm_n <= pr) { bucket_perm_choose()
108 unsigned int p = bucket->perm_n; bucket_perm_choose()
110 if (p < bucket->size - 1) { bucket_perm_choose()
111 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % bucket_perm_choose()
112 (bucket->size - p); bucket_perm_choose()
114 unsigned int t = bucket->perm[p + i]; bucket_perm_choose()
115 bucket->perm[p + i] = bucket->perm[p]; bucket_perm_choose()
116 bucket->perm[p] = t; bucket_perm_choose()
120 bucket->perm_n++; bucket_perm_choose()
122 for (i = 0; i < bucket->size; i++) bucket_perm_choose()
123 dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]); bucket_perm_choose()
125 s = bucket->perm[pr]; bucket_perm_choose()
127 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id, bucket_perm_choose()
128 bucket->size, x, r, pr, s); bucket_perm_choose()
129 return bucket->items[s]; bucket_perm_choose()
133 static int bucket_uniform_choose(struct crush_bucket_uniform *bucket, bucket_uniform_choose() argument
136 return bucket_perm_choose(&bucket->h, x, r); bucket_uniform_choose()
140 static int bucket_list_choose(struct crush_bucket_list *bucket, bucket_list_choose() argument
145 for (i = bucket->h.size-1; i >= 0; i--) { bucket_list_choose()
146 __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i], bucket_list_choose()
147 r, bucket->h.id); bucket_list_choose()
151 i, x, r, bucket->h.items[i], bucket->item_weights[i], bucket_list_choose()
152 bucket->sum_weights[i], w); bucket_list_choose()
153 w *= bucket->sum_weights[i]; bucket_list_choose()
156 if (w < bucket->item_weights[i]) bucket_list_choose()
157 return bucket->h.items[i]; bucket_list_choose()
160 dprintk("bad list sums for bucket %d\n", bucket->h.id); bucket_list_choose()
161 return bucket->h.items[0]; bucket_list_choose()
193 static int bucket_tree_choose(struct crush_bucket_tree *bucket, bucket_tree_choose() argument
201 n = bucket->num_nodes >> 1; bucket_tree_choose()
206 w = bucket->node_weights[n]; bucket_tree_choose()
207 t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r, bucket_tree_choose()
208 bucket->h.id) * (__u64)w; bucket_tree_choose()
213 if (t < bucket->node_weights[l]) bucket_tree_choose()
219 return bucket->h.items[n >> 1]; bucket_tree_choose()
225 static int bucket_straw_choose(struct crush_bucket_straw *bucket, bucket_straw_choose() argument
233 for (i = 0; i < bucket->h.size; i++) { bucket_straw_choose()
234 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r); bucket_straw_choose()
236 draw *= bucket->straws[i]; bucket_straw_choose()
242 return bucket->h.items[high]; bucket_straw_choose()
297 static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket, bucket_straw2_choose() argument
305 for (i = 0; i < bucket->h.size; i++) { bucket_straw2_choose()
306 w = bucket->item_weights[i]; bucket_straw2_choose()
308 u = crush_hash32_3(bucket->h.hash, x, bucket_straw2_choose()
309 bucket->h.items[i], r); bucket_straw2_choose()
340 return bucket->h.items[high]; bucket_straw2_choose()
365 dprintk("unknown bucket %d alg %d\n", in->id, in->alg); crush_bucket_choose()
394 * @bucket: the bucket we are choose an item from
411 struct crush_bucket *bucket, crush_choose_firstn()
428 struct crush_bucket *in = bucket; crush_choose_firstn()
436 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n", crush_choose_firstn()
438 bucket->id, x, outpos, numrep, crush_choose_firstn()
448 in = bucket; /* initial bucket */ crush_choose_firstn()
459 /* bucket choose */ crush_choose_firstn()
552 /* exhaustive bucket search */ crush_choose_firstn()
593 struct crush_bucket *bucket, crush_choose_indep()
603 struct crush_bucket *in = bucket; crush_choose_indep()
613 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "", crush_choose_indep()
614 bucket->id, x, outpos, numrep); crush_choose_indep()
642 in = bucket; /* initial bucket */ crush_choose_indep()
648 * if the first layer chooses the same bucket crush_choose_indep()
650 * choose a different item in that bucket. crush_choose_indep()
665 /* bucket choose */ crush_choose_indep()
667 dprintk(" empty bucket\n"); crush_choose_indep()
410 crush_choose_firstn(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, int out_size, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, int *out2, int parent_r) crush_choose_firstn() argument
592 crush_choose_indep(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r) crush_choose_indep() argument
H A Dcrush.c22 * crush_get_bucket_item_weight - Get weight of an item in given bucket
23 * @b: bucket pointer
24 * @p: item index in bucket
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/
H A Dlustre_handles.c66 struct handle_bucket *bucket; class_handle_hash() local
93 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; class_handle_hash()
94 spin_lock(&bucket->lock); class_handle_hash()
95 list_add_rcu(&h->h_link, &bucket->head); class_handle_hash()
97 spin_unlock(&bucket->lock); class_handle_hash()
127 struct handle_bucket *bucket; class_handle_unhash() local
129 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); class_handle_unhash()
131 spin_lock(&bucket->lock); class_handle_unhash()
133 spin_unlock(&bucket->lock); class_handle_unhash()
139 struct handle_bucket *bucket; class_handle2object() local
147 bucket = handle_hash + (cookie & HANDLE_HASH_MASK); class_handle2object()
150 list_for_each_entry_rcu(h, &bucket->head, h_link) { class_handle2object()
182 struct handle_bucket *bucket; class_handle_init() local
188 handle_hash = libcfs_kvzalloc(sizeof(*bucket) * HANDLE_HASH_SIZE, class_handle_init()
194 for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash; class_handle_init()
195 bucket--) { class_handle_init()
196 INIT_LIST_HEAD(&bucket->head); class_handle_init()
197 spin_lock_init(&bucket->lock); class_handle_init()
H A Dlu_object.c383 start = 0; /* restart from the first bucket */ lu_site_purge()
1744 * ls_obj_hash bucket.
/linux-4.4.14/fs/dlm/
H A Ddebug_fs.c371 unsigned bucket; member in struct:rsbtbl_iter
430 unsigned bucket, entry; table_seq_start() local
433 bucket = n >> 32; table_seq_start()
436 if (bucket >= ls->ls_rsbtbl_size) table_seq_start()
453 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; table_seq_start()
455 spin_lock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
462 ri->bucket = bucket; table_seq_start()
463 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
468 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
471 * move to the first rsb in the next non-empty bucket table_seq_start()
478 bucket++; table_seq_start()
481 if (bucket >= ls->ls_rsbtbl_size) { table_seq_start()
485 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; table_seq_start()
487 spin_lock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
493 ri->bucket = bucket; table_seq_start()
494 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
498 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_start()
510 unsigned bucket; table_seq_next() local
513 bucket = n >> 32; table_seq_next()
516 * move to the next rsb in the same bucket table_seq_next()
519 spin_lock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
527 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
532 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
536 * move to the first rsb in the next non-empty bucket table_seq_next()
543 bucket++; table_seq_next()
546 if (bucket >= ls->ls_rsbtbl_size) { table_seq_next()
550 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; table_seq_next()
552 spin_lock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
558 ri->bucket = bucket; table_seq_next()
559 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
563 spin_unlock(&ls->ls_rsbtbl[bucket].lock); table_seq_next()
H A Ddir.c203 uint32_t hash, bucket; find_rsb_root() local
207 bucket = hash & (ls->ls_rsbtbl_size - 1); find_rsb_root()
209 spin_lock(&ls->ls_rsbtbl[bucket].lock); find_rsb_root()
210 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); find_rsb_root()
212 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, find_rsb_root()
214 spin_unlock(&ls->ls_rsbtbl[bucket].lock); find_rsb_root()
H A Dlock.c352 uint32_t bucket = r->res_bucket; put_rsb() local
354 spin_lock(&ls->ls_rsbtbl[bucket].lock); put_rsb()
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock); put_rsb()
5470 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) find_grant_rsb() argument
5475 spin_lock(&ls->ls_rsbtbl[bucket].lock); find_grant_rsb()
5476 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { find_grant_rsb()
5486 spin_unlock(&ls->ls_rsbtbl[bucket].lock); find_grant_rsb()
5489 spin_unlock(&ls->ls_rsbtbl[bucket].lock); find_grant_rsb()
5513 int bucket = 0; dlm_recover_grant() local
5519 r = find_grant_rsb(ls, bucket); dlm_recover_grant()
5521 if (bucket == ls->ls_rsbtbl_size - 1) dlm_recover_grant()
5523 bucket++; dlm_recover_grant()
H A Ddlm_internal.h508 * The max number of resources per rsbtbl bucket that shrink will attempt
/linux-4.4.14/net/sched/
H A Dsch_hhf.c20 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
22 * in which the heavy-hitter bucket is served with less weight.
60 * dispatched to the heavy-hitter bucket accordingly.
67 * bucket.
70 * to the non-heavy-hitter bucket.
73 * send p to the heavy-hitter bucket.
104 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
105 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
327 /* Removes one skb from head of bucket. */ dequeue_head()
328 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) dequeue_head() argument
330 struct sk_buff *skb = bucket->head; dequeue_head()
332 bucket->head = skb->next; dequeue_head()
337 /* Tail-adds skb to bucket. */ bucket_add()
338 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) bucket_add() argument
340 if (bucket->head == NULL) bucket_add()
341 bucket->head = skb; bucket_add()
343 bucket->tail->next = skb; bucket_add()
344 bucket->tail = skb; bucket_add()
351 struct wdrr_bucket *bucket; hhf_drop() local
354 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; hhf_drop()
355 if (!bucket->head) hhf_drop()
356 bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; hhf_drop()
358 if (bucket->head) { hhf_drop()
359 struct sk_buff *skb = dequeue_head(bucket); hhf_drop()
367 /* Return id of the bucket from which the packet was dropped. */ hhf_drop()
368 return bucket - q->buckets; hhf_drop()
384 struct wdrr_bucket *bucket; hhf_enqueue() local
389 bucket = &q->buckets[idx]; hhf_enqueue()
390 bucket_add(bucket, skb); hhf_enqueue()
393 if (list_empty(&bucket->bucketchain)) { hhf_enqueue()
401 /* Always move heavy-hitters to old bucket. */ hhf_enqueue()
403 list_add_tail(&bucket->bucketchain, &q->old_buckets); hhf_enqueue()
406 list_add_tail(&bucket->bucketchain, &q->new_buckets); hhf_enqueue()
408 bucket->deficit = weight * q->quantum; hhf_enqueue()
416 * bucket. hhf_enqueue()
430 struct wdrr_bucket *bucket; hhf_dequeue() local
440 bucket = list_first_entry(head, struct wdrr_bucket, bucketchain); hhf_dequeue()
442 if (bucket->deficit <= 0) { hhf_dequeue()
443 int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? hhf_dequeue()
446 bucket->deficit += weight * q->quantum; hhf_dequeue()
447 list_move_tail(&bucket->bucketchain, &q->old_buckets); hhf_dequeue()
451 if (bucket->head) { hhf_dequeue()
452 skb = dequeue_head(bucket); hhf_dequeue()
460 list_move_tail(&bucket->bucketchain, &q->old_buckets); hhf_dequeue()
462 list_del_init(&bucket->bucketchain); hhf_dequeue()
466 bucket->deficit -= qdisc_pkt_len(skb); hhf_dequeue()
657 struct wdrr_bucket *bucket = q->buckets + i; hhf_init() local
659 INIT_LIST_HEAD(&bucket->bucketchain); hhf_init()
H A Dsch_qfq.c89 for the scheduler: bitmaps and bucket lists.
901 unsigned int i; /* slot index in the bucket list */ qfq_slot_insert()
938 * Returns the first aggregate in the first non-empty bucket of the
939 * group. As a side effect, adjusts the bucket list so the first
940 * non-empty bucket is at position 0 in full_slots.
962 * adjust the bucket list. When the start time of a group decreases,
1291 * Insert agg in the correct bucket. qfq_schedule_agg()
1293 * bucket list and simply go to the insertion phase. qfq_schedule_agg()
1295 * in the bucket list, and also recompute the group state. qfq_schedule_agg()
1361 * not in the front bucket, or if the latter has other aggregates in
1362 * the front bucket, we can simply remove the aggregate with no other
H A Dcls_rsvp.h25 destination address and protocol ID, every bucket contains a list
29 Every bucket has a smaller hash table keyed by source address
31 Every bucket is again a list of "RSVP flows", selected by
212 /* And wildcard bucket... */ rsvp_classify()
H A Dsch_htb.c2 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
101 s64 buffer, cbuffer;/* token bucket depth/rate */
123 /* token bucket parameters */
638 * borrowing from "level". It accounts bytes to ceil leaky bucket for
639 * leaf and all ancestors and to rate bucket for ancestors at levels
H A Dcls_route.c45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
H A Dsch_tbf.c105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
H A Dsch_netem.c54 bucket or other rate control.
/linux-4.4.14/fs/ocfs2/
H A Dxattr.c70 /* The actual buffers that make up the bucket */
73 /* How many blocks make up one bucket for this filesystem */
135 struct ocfs2_xattr_bucket *bucket; member in struct:ocfs2_xattr_search
289 struct ocfs2_xattr_bucket *bucket,
311 struct ocfs2_xattr_bucket *bucket,
332 struct ocfs2_xattr_bucket *bucket; ocfs2_xattr_bucket_new() local
337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); ocfs2_xattr_bucket_new()
338 if (bucket) { ocfs2_xattr_bucket_new()
339 bucket->bu_inode = inode; ocfs2_xattr_bucket_new()
340 bucket->bu_blocks = blks; ocfs2_xattr_bucket_new()
343 return bucket; ocfs2_xattr_bucket_new()
346 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) ocfs2_xattr_bucket_relse() argument
350 for (i = 0; i < bucket->bu_blocks; i++) { ocfs2_xattr_bucket_relse()
351 brelse(bucket->bu_bhs[i]); ocfs2_xattr_bucket_relse()
352 bucket->bu_bhs[i] = NULL; ocfs2_xattr_bucket_relse()
356 static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket) ocfs2_xattr_bucket_free() argument
358 if (bucket) { ocfs2_xattr_bucket_free()
359 ocfs2_xattr_bucket_relse(bucket); ocfs2_xattr_bucket_free()
360 bucket->bu_inode = NULL; ocfs2_xattr_bucket_free()
361 kfree(bucket); ocfs2_xattr_bucket_free()
366 * A bucket that has never been written to disk doesn't need to be
371 static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket, ocfs2_init_xattr_bucket() argument
376 for (i = 0; i < bucket->bu_blocks; i++) { ocfs2_init_xattr_bucket()
377 bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb, ocfs2_init_xattr_bucket()
379 if (!bucket->bu_bhs[i]) { ocfs2_init_xattr_bucket()
385 if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode), ocfs2_init_xattr_bucket()
386 bucket->bu_bhs[i])) { ocfs2_init_xattr_bucket()
388 ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode), ocfs2_init_xattr_bucket()
389 bucket->bu_bhs[i]); ocfs2_init_xattr_bucket()
391 set_buffer_uptodate(bucket->bu_bhs[i]); ocfs2_init_xattr_bucket()
392 ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode), ocfs2_init_xattr_bucket()
393 bucket->bu_bhs[i]); ocfs2_init_xattr_bucket()
399 ocfs2_xattr_bucket_relse(bucket); ocfs2_init_xattr_bucket()
403 /* Read the xattr bucket at xb_blkno */ ocfs2_read_xattr_bucket()
404 static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket, ocfs2_read_xattr_bucket() argument
409 rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno, ocfs2_read_xattr_bucket()
410 bucket->bu_blocks, bucket->bu_bhs, 0, ocfs2_read_xattr_bucket()
413 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); ocfs2_read_xattr_bucket()
414 rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb, ocfs2_read_xattr_bucket()
415 bucket->bu_bhs, ocfs2_read_xattr_bucket()
416 bucket->bu_blocks, ocfs2_read_xattr_bucket()
417 &bucket_xh(bucket)->xh_check); ocfs2_read_xattr_bucket()
418 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); ocfs2_read_xattr_bucket()
424 ocfs2_xattr_bucket_relse(bucket); ocfs2_read_xattr_bucket()
429 struct ocfs2_xattr_bucket *bucket, ocfs2_xattr_bucket_journal_access()
434 for (i = 0; i < bucket->bu_blocks; i++) { ocfs2_xattr_bucket_journal_access()
436 INODE_CACHE(bucket->bu_inode), ocfs2_xattr_bucket_journal_access()
437 bucket->bu_bhs[i], type); ocfs2_xattr_bucket_journal_access()
448 struct ocfs2_xattr_bucket *bucket) ocfs2_xattr_bucket_journal_dirty()
452 spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); ocfs2_xattr_bucket_journal_dirty()
453 ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb, ocfs2_xattr_bucket_journal_dirty()
454 bucket->bu_bhs, bucket->bu_blocks, ocfs2_xattr_bucket_journal_dirty()
455 &bucket_xh(bucket)->xh_check); ocfs2_xattr_bucket_journal_dirty()
456 spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock); ocfs2_xattr_bucket_journal_dirty()
458 for (i = 0; i < bucket->bu_blocks; i++) ocfs2_xattr_bucket_journal_dirty()
459 ocfs2_journal_dirty(handle, bucket->bu_bhs[i]); ocfs2_xattr_bucket_journal_dirty()
664 * xattr bucket, otherwise reserve one metadata block ocfs2_calc_xattr_init()
1200 xs->bucket = ocfs2_xattr_bucket_new(inode); ocfs2_xattr_block_get()
1201 if (!xs->bucket) { ocfs2_xattr_block_get()
1231 bucket_xh(xs->bucket), ocfs2_xattr_block_get()
1239 xs->base = bucket_block(xs->bucket, block_off); ocfs2_xattr_block_get()
1257 ocfs2_xattr_bucket_free(xs->bucket); ocfs2_xattr_block_get()
1690 struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xa_bucket_journal_access() local
1692 return ocfs2_xattr_bucket_journal_access(handle, bucket, type); ocfs2_xa_bucket_journal_access()
1698 struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xa_bucket_journal_dirty() local
1700 ocfs2_xattr_bucket_journal_dirty(handle, bucket); ocfs2_xa_bucket_journal_dirty()
1706 struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xa_bucket_offset_pointer() local
1709 /* The header is at the front of the bucket */ ocfs2_xa_bucket_offset_pointer()
1713 return bucket_block(bucket, block) + block_offset; ocfs2_xa_bucket_offset_pointer()
1725 struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xa_bucket_get_free_start() local
1726 return le16_to_cpu(bucket_xh(bucket)->xh_free_start); ocfs2_xa_bucket_get_free_start()
1755 * reuse. They live as holes until the bucket fills, and then ocfs2_xa_bucket_check_space()
1756 * the bucket is defragmented. However, the bucket can reclaim ocfs2_xa_bucket_check_space()
1847 struct ocfs2_xattr_bucket *bucket = loc->xl_storage; ocfs2_xa_bucket_fill_value_buf() local
1856 /* We expect the bucket to be filled in */ ocfs2_xa_bucket_fill_value_buf()
1857 BUG_ON(!bucket->bu_bhs[block_offset]); ocfs2_xa_bucket_fill_value_buf()
1860 vb->vb_bh = bucket->bu_bhs[block_offset]; ocfs2_xa_bucket_fill_value_buf()
1902 * transaction and open a new one. If this is a bucket, truncate ocfs2_xa_value_truncate()
1904 * the caller is expecting to dirty the entire bucket. So we must ocfs2_xa_value_truncate()
1929 * important for an empty bucket, as it keeps track of the ocfs2_xa_remove_entry()
1930 * bucket's hash value. It doesn't hurt empty block storage. ocfs2_xa_remove_entry()
2289 struct ocfs2_xattr_bucket *bucket, ocfs2_init_xattr_bucket_xa_loc()
2292 loc->xl_inode = bucket->bu_inode; ocfs2_init_xattr_bucket_xa_loc()
2294 loc->xl_storage = bucket; ocfs2_init_xattr_bucket_xa_loc()
2295 loc->xl_header = bucket_xh(bucket); ocfs2_init_xattr_bucket_xa_loc()
3077 bucket_xh(xbs->bucket), ocfs2_calc_xattr_set_need()
3080 base = bucket_block(xbs->bucket, block_off); ocfs2_calc_xattr_set_need()
3195 * This cluster will be used either for new bucket or for ocfs2_calc_xattr_set_need()
3197 * If the cluster size is the same as the bucket size, one ocfs2_calc_xattr_set_need()
3198 * more is needed since we may need to extend the bucket ocfs2_calc_xattr_set_need()
3467 * In extreme situation, may need xattr bucket when ocfs2_xattr_set_handle()
3469 * the credits for bucket in mknod. ocfs2_xattr_set_handle()
3472 xbs.bucket = ocfs2_xattr_bucket_new(inode); ocfs2_xattr_set_handle()
3473 if (!xbs.bucket) { ocfs2_xattr_set_handle()
3498 ocfs2_xattr_bucket_free(xbs.bucket); ocfs2_xattr_set_handle()
3546 * bucket. ocfs2_xattr_set()
3548 xbs.bucket = ocfs2_xattr_bucket_new(inode); ocfs2_xattr_set()
3549 if (!xbs.bucket) { ocfs2_xattr_set()
3656 ocfs2_xattr_bucket_free(xbs.bucket); ocfs2_xattr_set()
3726 struct ocfs2_xattr_bucket *bucket,
3730 struct ocfs2_xattr_bucket *bucket, ocfs2_find_xe_in_bucket()
3738 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_find_xe_in_bucket()
3744 * We don't use binary search in the bucket because there ocfs2_find_xe_in_bucket()
3772 xe_name = bucket_block(bucket, block_off) + new_offset; ocfs2_find_xe_in_bucket()
3787 * The ocfs2_xattr_header.xh_num_buckets of the first bucket contains
3791 * hash is in the gap of 2 buckets, return the lower bucket.
3807 int low_bucket = 0, bucket, high_bucket; ocfs2_xattr_bucket_find() local
3830 bucket = (low_bucket + high_bucket) / 2; ocfs2_xattr_bucket_find()
3831 blkno = p_blkno + bucket * blk_per_bucket; ocfs2_xattr_bucket_find()
3841 high_bucket = bucket - 1; ocfs2_xattr_bucket_find()
3847 * bucket is larger than the search one. for an empty ocfs2_xattr_bucket_find()
3848 * bucket, the last one is also the first one. ocfs2_xattr_bucket_find()
3859 low_bucket = bucket + 1; ocfs2_xattr_bucket_find()
3863 /* the searched xattr should reside in this bucket if exists. */ ocfs2_xattr_bucket_find()
3875 * Record the bucket we have found. ocfs2_xattr_bucket_find()
3877 * always set it to the previous bucket. ocfs2_xattr_bucket_find()
3883 ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno); ocfs2_xattr_bucket_find()
3889 xs->header = bucket_xh(xs->bucket); ocfs2_xattr_bucket_find()
3890 xs->base = bucket_block(xs->bucket, 0); ocfs2_xattr_bucket_find()
3897 (unsigned long long)bucket_blkno(xs->bucket), ocfs2_xattr_bucket_find()
3960 struct ocfs2_xattr_bucket *bucket; ocfs2_iterate_xattr_buckets() local
3962 bucket = ocfs2_xattr_bucket_new(inode); ocfs2_iterate_xattr_buckets()
3963 if (!bucket) { ocfs2_iterate_xattr_buckets()
3972 for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) { ocfs2_iterate_xattr_buckets()
3973 ret = ocfs2_read_xattr_bucket(bucket, blkno); ocfs2_iterate_xattr_buckets()
3980 * The real bucket num in this series of blocks is stored ocfs2_iterate_xattr_buckets()
3981 * in the 1st bucket. ocfs2_iterate_xattr_buckets()
3984 num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets); ocfs2_iterate_xattr_buckets()
3987 le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash)); ocfs2_iterate_xattr_buckets()
3989 ret = func(inode, bucket, para); ocfs2_iterate_xattr_buckets()
3995 ocfs2_xattr_bucket_relse(bucket); ocfs2_iterate_xattr_buckets()
4000 ocfs2_xattr_bucket_free(bucket); ocfs2_iterate_xattr_buckets()
4030 struct ocfs2_xattr_bucket *bucket, ocfs2_list_xattr_bucket()
4038 for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) { ocfs2_list_xattr_bucket()
4039 struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i]; ocfs2_list_xattr_bucket()
4045 bucket_xh(bucket), ocfs2_list_xattr_bucket()
4052 name = (const char *)bucket_block(bucket, block_off) + ocfs2_list_xattr_bucket()
4163 * When the ocfs2_xattr_block is filled up, new bucket will be created
4164 * and all the xattr entries will be moved to the new bucket.
4165 * The header goes at the start of the bucket, and the names+values are
4172 struct ocfs2_xattr_bucket *bucket) ocfs2_cp_xattr_block_to_bucket()
4181 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_cp_xattr_block_to_bucket()
4184 char *target = bucket_block(bucket, blks - 1); ocfs2_cp_xattr_block_to_bucket()
4188 (unsigned long long)bucket_blkno(bucket)); ocfs2_cp_xattr_block_to_bucket()
4191 memset(bucket_block(bucket, i), 0, blocksize); ocfs2_cp_xattr_block_to_bucket()
4213 target = bucket_block(bucket, 0); ocfs2_cp_xattr_block_to_bucket()
4235 * While if the entry is in index b-tree, "bucket" indicates the
4247 xs->header = bucket_xh(xs->bucket); ocfs2_xattr_update_xattr_search()
4248 xs->base = bucket_block(xs->bucket, 0); ocfs2_xattr_update_xattr_search()
4277 BUG_ON(!xs->bucket); ocfs2_xattr_create_index_block()
4301 * The bucket may spread in many blocks, and ocfs2_xattr_create_index_block()
4303 * in the whole bucket(one for entry and one for data). ocfs2_xattr_create_index_block()
4309 ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1); ocfs2_xattr_create_index_block()
4315 ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket, ocfs2_xattr_create_index_block()
4322 ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket); ocfs2_xattr_create_index_block()
4323 ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket); ocfs2_xattr_create_index_block()
4366 * defrag a xattr bucket if we find that the bucket has some
4368 * We will move all the name/value pairs to the end of the bucket
4373 struct ocfs2_xattr_bucket *bucket) ocfs2_defrag_xattr_bucket()
4379 u64 blkno = bucket_blkno(bucket); ocfs2_defrag_xattr_bucket()
4397 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) ocfs2_defrag_xattr_bucket()
4398 memcpy(buf, bucket_block(bucket, i), blocksize); ocfs2_defrag_xattr_bucket()
4400 ret = ocfs2_xattr_bucket_journal_access(handle, bucket, ocfs2_defrag_xattr_bucket()
4424 /* Move all name/values to the end of the bucket. */ ocfs2_defrag_xattr_bucket()
4447 "bucket %llu\n", (unsigned long long)blkno); ocfs2_defrag_xattr_bucket()
4453 "bucket %llu\n", (unsigned long long)blkno); ocfs2_defrag_xattr_bucket()
4467 for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize) ocfs2_defrag_xattr_bucket()
4468 memcpy(bucket_block(bucket, i), buf, blocksize); ocfs2_defrag_xattr_bucket()
4469 ocfs2_xattr_bucket_journal_dirty(handle, bucket); ocfs2_defrag_xattr_bucket()
4479 * clusters contains more than bucket, we can easily split one cluster
4480 * at a bucket boundary. So we take the last cluster of the existing
4486 * extent's bucket count. header_bh is the bucket were we were hoping
4487 * to insert our xattr. If the bucket move places the target in the new
4525 /* This is the first bucket that got moved */ ocfs2_mv_xattr_bucket_cross_cluster()
4529 * If the target bucket was part of the moved buckets, we need to ocfs2_mv_xattr_bucket_cross_cluster()
4533 /* Find the block for the new target bucket */ ocfs2_mv_xattr_bucket_cross_cluster()
4560 * Find the suitable pos when we divide a bucket into 2.
4562 * in the same bucket.
4602 * Move some xattrs in old bucket(blk) to new bucket(new_blk).
4603 * first_hash will record the 1st hash of the new bucket.
4607 * same bucket. If all the xattrs in this bucket have the same hash
4608 * value, the new bucket will be initialized as an empty one and the
4681 * initialized a new empty bucket here. ocfs2_divide_xattr_bucket()
4683 * that of the last entry in the previous bucket. ocfs2_divide_xattr_bucket()
4696 /* copy the whole bucket to the new first. */ ocfs2_divide_xattr_bucket()
4699 /* update the new bucket. */ ocfs2_divide_xattr_bucket()
4704 * the old bucket first. ocfs2_divide_xattr_bucket()
4716 * Now begin the modification to the new bucket. ocfs2_divide_xattr_bucket()
4718 * In the new bucket, We just move the xattr entry to the beginning ocfs2_divide_xattr_bucket()
4720 * bucket, and they will be removed when ocfs2_defrag_xattr_bucket is ocfs2_divide_xattr_bucket()
4736 /* Calculate xh_free_start for the new bucket. */ ocfs2_divide_xattr_bucket()
4754 /* store the first_hash of the new bucket. */ ocfs2_divide_xattr_bucket()
4759 * Now only update the 1st block of the old bucket. If we ocfs2_divide_xattr_bucket()
4760 * just added a new empty bucket, there is no need to modify ocfs2_divide_xattr_bucket()
4783 * Copy xattr from one bucket to another bucket.
4834 * how we get here, and the bucket isn't really new. ocfs2_cp_xattr_bucket()
4882 /* The first bucket of the original extent */ ocfs2_mv_xattr_buckets()
4884 /* The first bucket of the new extent */ ocfs2_mv_xattr_buckets()
4899 * We need to update the first bucket of the old extent and all ocfs2_mv_xattr_buckets()
4928 * Get the new bucket ready before we dirty anything ocfs2_mv_xattr_buckets()
4962 * This function should only be called when bucket size == cluster size.
4982 /* Move half of the xattr in start_blk to the next bucket. */ ocfs2_divide_xattr_cluster()
4996 * to extend the insert bucket.
5000 * 1. If cluster size > bucket size, that means the previous cluster has more
5001 * than 1 bucket, so just move half nums of bucket into the new cluster and
5002 * update the first_bh and header_bh if the insert bucket has been moved
5078 * indicates the bucket we will insert the new xattrs. They will be updated
5171 * We are given an extent. 'first' is the bucket at the very front of
5172 * the extent. The extent has space for an additional bucket past
5174 * of the target bucket. We wish to shift every bucket past the target
5176 * target, we split the target between itself and the now-empty bucket
5195 /* The extent must have room for an additional bucket */ ocfs2_extend_xattr_bucket()
5199 /* end_blk points to the last existing bucket */ ocfs2_extend_xattr_bucket()
5203 * end_blk is the start of the last existing bucket. ocfs2_extend_xattr_bucket()
5204 * Thus, (end_blk - target_blk) covers the target bucket and ocfs2_extend_xattr_bucket()
5205 * every bucket after it up to, but not including, the last ocfs2_extend_xattr_bucket()
5206 * existing bucket. Then we add the last existing bucket, the ocfs2_extend_xattr_bucket()
5207 * new bucket, and the first bucket (3 * blk_per_bucket). ocfs2_extend_xattr_bucket()
5231 /* Move half of the xattr in target_blkno to the next bucket. */ ocfs2_extend_xattr_bucket()
5243 * Add new xattr bucket in an extent record and adjust the buckets
5245 * bucket we want to insert into.
5248 * one. Half of target's xattrs will be moved to the next bucket.
5270 /* The bucket at the front of the extent */ ocfs2_add_new_xattr_bucket()
5276 /* The first bucket of the original extent */ ocfs2_add_new_xattr_bucket()
5300 * This can move first+target if the target bucket moves ocfs2_add_new_xattr_bucket()
5334 * Truncate the specified xe_off entry in xattr bucket.
5335 * bucket is indicated by header_bh and len is the new length.
5341 struct ocfs2_xattr_bucket *bucket, ocfs2_xattr_bucket_value_truncate()
5349 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_xattr_bucket_value_truncate()
5367 vb.vb_bh = bucket->bu_bhs[value_blk]; ocfs2_xattr_bucket_value_truncate()
5374 * From here on out we have to dirty the bucket. The generic ocfs2_xattr_bucket_value_truncate()
5375 * value calls only modify one of the bucket's bhs, but we need ocfs2_xattr_bucket_value_truncate()
5376 * to send the bucket at once. So if they error, they *could* have ocfs2_xattr_bucket_value_truncate()
5378 * the whole bucket. This leaves us in a consistent state. ocfs2_xattr_bucket_value_truncate()
5381 (unsigned long long)bucket_blkno(bucket), xe_off, len); ocfs2_xattr_bucket_value_truncate()
5388 ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket, ocfs2_xattr_bucket_value_truncate()
5397 ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket); ocfs2_xattr_bucket_value_truncate()
5499 * check whether the xattr bucket is filled up with the same hash value.
5505 struct ocfs2_xattr_bucket *bucket, ocfs2_check_xattr_bucket_collision()
5508 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_check_xattr_bucket_collision()
5516 mlog(ML_ERROR, "Too much hash collision in xattr bucket %llu, " ocfs2_check_xattr_bucket_collision()
5518 (unsigned long long)bucket_blkno(bucket), ocfs2_check_xattr_bucket_collision()
5527 * Try to set the entry in the current bucket. If we fail, the caller
5528 * will handle getting us another bucket.
5540 ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket, ocfs2_xattr_set_entry_bucket()
5552 /* Ok, we need space. Let's try defragmenting the bucket. */ ocfs2_xattr_set_entry_bucket()
5554 xs->bucket); ocfs2_xattr_set_entry_bucket()
5590 /* Ack, need more space. Let's try to get another bucket! */ ocfs2_xattr_set_entry_index_block()
5595 * one bucket's worth, so check it here whether we need to ocfs2_xattr_set_entry_index_block()
5596 * add a new bucket for the insert. ocfs2_xattr_set_entry_index_block()
5599 xs->bucket, ocfs2_xattr_set_entry_index_block()
5608 xs->bucket, ocfs2_xattr_set_entry_index_block()
5617 * xs->bucket if it moved, but it will not have updated ocfs2_xattr_set_entry_index_block()
5622 ocfs2_xattr_bucket_relse(xs->bucket); ocfs2_xattr_set_entry_index_block()
5630 /* Ok, we have a new bucket, let's try again */ ocfs2_xattr_set_entry_index_block()
5640 struct ocfs2_xattr_bucket *bucket, ocfs2_delete_xattr_in_bucket()
5644 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_delete_xattr_in_bucket()
5662 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, ocfs2_delete_xattr_in_bucket()
5682 ret = ocfs2_xattr_bucket_value_truncate(inode, bucket, ocfs2_delete_xattr_in_bucket()
5704 * Whenever we modify a xattr value root in the bucket(e.g, CoW
5706 * the metaecc for the whole bucket. So it is done here.
5716 struct ocfs2_xattr_bucket *bucket = ocfs2_xattr_bucket_post_refcount() local
5719 ret = ocfs2_xattr_bucket_journal_access(handle, bucket, ocfs2_xattr_bucket_post_refcount()
5726 ocfs2_xattr_bucket_journal_dirty(handle, bucket); ocfs2_xattr_bucket_post_refcount()
5762 struct ocfs2_xattr_bucket *bucket = NULL; ocfs2_prepare_refcount_xattr() local
5785 bucket_xh(xbs->bucket), ocfs2_prepare_refcount_xattr()
5792 base = bucket_block(xbs->bucket, block_off); ocfs2_prepare_refcount_xattr()
5793 vb.vb_bh = xbs->bucket->bu_bhs[block_off]; ocfs2_prepare_refcount_xattr()
5798 bucket = xbs->bucket; ocfs2_prepare_refcount_xattr()
5799 refcount.credits = bucket->bu_blocks; ocfs2_prepare_refcount_xattr()
5800 refcount.para = bucket; ocfs2_prepare_refcount_xattr()
5987 struct ocfs2_xattr_bucket *bucket, ocfs2_get_xattr_tree_value_root()
5993 struct ocfs2_xattr_header *xh = bucket_xh(bucket); ocfs2_get_xattr_tree_value_root()
5998 bucket_xh(bucket), ocfs2_get_xattr_tree_value_root()
6007 base = bucket_block(bucket, block_off); ocfs2_get_xattr_tree_value_root()
6013 *bh = bucket->bu_bhs[block_off]; ocfs2_get_xattr_tree_value_root()
6019 * For a given xattr bucket, refcount all the entries which
6023 struct ocfs2_xattr_bucket *bucket, ocfs2_xattr_bucket_value_refcount()
6031 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; ocfs2_xattr_bucket_value_refcount()
6037 .credits = bucket->bu_blocks, ocfs2_xattr_bucket_value_refcount()
6038 .para = bucket, ocfs2_xattr_bucket_value_refcount()
6048 (unsigned long long)bucket_blkno(bucket), ocfs2_xattr_bucket_value_refcount()
6056 ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i, ocfs2_xattr_bucket_value_refcount()
6315 * It can be used for inode, block and bucket.
6664 * We have to handle the case that both old bucket and new bucket
6678 struct ocfs2_xattr_bucket *bucket; ocfs2_get_reflink_xattr_value_root() local
6681 bucket = args->old_bucket; ocfs2_get_reflink_xattr_value_root()
6683 bucket = args->new_bucket; ocfs2_get_reflink_xattr_value_root()
6685 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, ocfs2_get_reflink_xattr_value_root()
6703 struct ocfs2_xattr_bucket *bucket = ocfs2_value_tree_metas_in_bucket() local
6706 return ocfs2_get_xattr_tree_value_root(sb, bucket, offset, ocfs2_value_tree_metas_in_bucket()
6711 struct ocfs2_xattr_bucket *bucket, ocfs2_calc_value_tree_metas()
6717 (struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data; ocfs2_calc_value_tree_metas()
6719 /* Add the credits for this bucket first. */ ocfs2_calc_value_tree_metas()
6720 metas->credits += bucket->bu_blocks; ocfs2_calc_value_tree_metas()
6721 return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0], ocfs2_calc_value_tree_metas()
6725 bucket); ocfs2_calc_value_tree_metas()
6862 * bucket. ocfs2_reflink_xattr_bucket()
6887 * Re-access and dirty the bucket to calculate metaecc. ocfs2_reflink_xattr_bucket()
7036 * We will add bucket one by one, and refcount all the xattrs in the bucket
428 ocfs2_xattr_bucket_journal_access(handle_t *handle, struct ocfs2_xattr_bucket *bucket, int type) ocfs2_xattr_bucket_journal_access() argument
447 ocfs2_xattr_bucket_journal_dirty(handle_t *handle, struct ocfs2_xattr_bucket *bucket) ocfs2_xattr_bucket_journal_dirty() argument
2288 ocfs2_init_xattr_bucket_xa_loc(struct ocfs2_xa_loc *loc, struct ocfs2_xattr_bucket *bucket, struct ocfs2_xattr_entry *entry) ocfs2_init_xattr_bucket_xa_loc() argument
3729 ocfs2_find_xe_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, int name_index, const char *name, u32 name_hash, u16 *xe_index, int *found) ocfs2_find_xe_in_bucket() argument
4029 ocfs2_list_xattr_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) ocfs2_list_xattr_bucket() argument
4170 ocfs2_cp_xattr_block_to_bucket(struct inode *inode, struct buffer_head *xb_bh, struct ocfs2_xattr_bucket *bucket) ocfs2_cp_xattr_block_to_bucket() argument
4371 ocfs2_defrag_xattr_bucket(struct inode *inode, handle_t *handle, struct ocfs2_xattr_bucket *bucket) ocfs2_defrag_xattr_bucket() argument
5340 ocfs2_xattr_bucket_value_truncate(struct inode *inode, struct ocfs2_xattr_bucket *bucket, int xe_off, int len, struct ocfs2_xattr_set_ctxt *ctxt) ocfs2_xattr_bucket_value_truncate() argument
5504 ocfs2_check_xattr_bucket_collision(struct inode *inode, struct ocfs2_xattr_bucket *bucket, const char *name) ocfs2_check_xattr_bucket_collision() argument
5639 ocfs2_delete_xattr_in_bucket(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) ocfs2_delete_xattr_in_bucket() argument
5986 ocfs2_get_xattr_tree_value_root(struct super_block *sb, struct ocfs2_xattr_bucket *bucket, int offset, struct ocfs2_xattr_value_root **xv, struct buffer_head **bh) ocfs2_get_xattr_tree_value_root() argument
6022 ocfs2_xattr_bucket_value_refcount(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) ocfs2_xattr_bucket_value_refcount() argument
6710 ocfs2_calc_value_tree_metas(struct inode *inode, struct ocfs2_xattr_bucket *bucket, void *para) ocfs2_calc_value_tree_metas() argument
H A Dxattr.h74 * or inside an xattr bucket, which is the leaf of a tree rooted in an
H A Docfs2_fs.h1026 * Note that it can be stored in inode, one block or one xattr bucket.
1032 xattr bucket). */
1053 length in this bucket. */
1057 bucket. */
/linux-4.4.14/drivers/md/bcache/
H A Dalloc.c2 * Primary bucket allocation code
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
32 * If we've got discards enabled, that happens when a bucket moves from the
45 * a bucket is in danger of wrapping around we simply skip invalidating it that
49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) bch_inc_gen()
87 struct bucket *b; bch_rescale_priorities()
124 static inline bool can_inc_bucket_gen(struct bucket *b) can_inc_bucket_gen()
129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) bch_can_invalidate_bucket()
139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) __bch_invalidate_one_bucket()
152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) bch_invalidate_one_bucket()
162 * bucket, and in order for that multiply to make sense we have to scale bucket
164 * Thus, we scale the bucket priorities so that the bucket with the smallest
180 struct bucket *b; invalidate_buckets_lru()
217 struct bucket *b; invalidate_buckets_fifo()
240 struct bucket *b; invalidate_buckets_random()
298 static int bch_allocator_push(struct cache *ca, long bucket) bch_allocator_push() argument
303 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) bch_allocator_push()
307 if (fifo_push(&ca->free[i], bucket)) bch_allocator_push()
322 * possibly issue discards to them, then we add the bucket to bch_allocator_thread()
326 long bucket; bch_allocator_thread() local
328 fifo_pop(&ca->free_inc, bucket); bch_allocator_thread()
333 bucket_to_sector(ca->set, bucket), bch_allocator_thread()
338 allocator_wait(ca, bch_allocator_push(ca, bucket)); bch_allocator_thread()
384 struct bucket *b; bch_bucket_alloc()
447 void __bch_bucket_free(struct cache *ca, struct bucket *b) __bch_bucket_free()
516 * write streams for better cache utilization: first we look for a bucket where
518 * we look for a bucket that was last used by the same task.
579 * We might have to allocate a new bucket, which we can't do with a bch_alloc_sectors()
582 * allocated bucket(s). bch_alloc_sectors()
603 * second time we call find_data_bucket(). If we allocated a bucket but bch_alloc_sectors()
624 * Move b to the end of the lru, and keep track of what this bucket was bch_alloc_sectors()
645 * into the btree, but if we're done with this bucket we just transfer bch_alloc_sectors()
H A Dbcache.h41 * To do this, we first divide the cache device up into buckets. A bucket is the
45 * Each bucket has a 16 bit priority, and an 8 bit generation associated with
50 * The priority is used to implement an LRU. We reset a bucket's priority when
52 * of each bucket. It could be used to implement something more sophisticated,
57 * must match the gen of the bucket it points into. Thus, to reuse a bucket all
61 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
99 * accomplished by either by invalidating pointers (by incrementing a bucket's
109 * Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
110 * free smaller than a bucket - so, that's how big our btree nodes are.
112 * (If buckets are really big we'll only use part of the bucket for a btree node
113 * - no less than 1/4th - but a bucket still contains no more than a single
115 * bucket's gen for deleting btree nodes when we rewrite/split a node.)
138 * We can't just invalidate any bucket - it might contain dirty data or
140 * later, leaving no valid pointers into that bucket in the index.
143 * It also counts how much valid data it each bucket currently contains, so that
148 * some threshold, it rewrites the btree node to avoid the bucket's generation
194 struct bucket { struct
204 * as multiple threads touch struct bucket without locking
207 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
213 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
214 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
419 struct bucket *buckets;
421 DECLARE_HEAP(struct bucket *, heap);
514 * full bucket
551 * When we free a btree node, we increment the gen of the bucket the
565 * rescale; when it hits 0 we rescale all the bucket priorities.
572 * priority of any bucket.
589 * The allocation code needs gc_mark in struct bucket to be correct, but
729 static inline struct bucket *PTR_BUCKET(struct cache_set *c, PTR_BUCKET()
823 * bucket_gc_gen() returns the difference between the bucket's current gen and
824 * the oldest gen of any pointer into that bucket in the btree (last_gc).
827 static inline uint8_t bucket_gc_gen(struct bucket *b) bucket_gc_gen()
862 uint8_t bch_inc_gen(struct cache *, struct bucket *);
865 bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
866 void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
868 void __bch_bucket_free(struct cache *, struct bucket *);
H A Djournal.h59 * To do that we track, for each journal bucket, the sequence number of the
61 * don't need anything in that bucket anymore. From that we track the last
62 * journal bucket we still need; all this is tracked in struct journal_device
111 /* Number of blocks free in the bucket(s) we're currently writing to */
127 * For each journal bucket, contains the max sequence number of the
128 * journal writes it contains - so we know when a bucket can be reused.
132 /* Journal bucket we're currently writing to */
135 /* Last journal bucket that still contains an open journal entry */
138 /* Next journal bucket to be discarded */
H A Dextents.c9 * bucket priority is increased on cache hit, and periodically all the buckets
53 size_t bucket = PTR_BUCKET_NR(c, k, i); __ptr_invalid() local
57 bucket < ca->sb.first_bucket || __ptr_invalid()
58 bucket >= ca->sb.nbuckets) __ptr_invalid()
74 size_t bucket = PTR_BUCKET_NR(c, k, i); bch_ptr_status() local
79 if (bucket < ca->sb.first_bucket) bch_ptr_status()
81 if (bucket >= ca->sb.nbuckets) bch_ptr_status()
136 printk(" bucket %zu", n); bch_bkey_dump()
175 struct bucket *g; btree_ptr_bad_expensive()
197 "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu", btree_ptr_bad_expensive()
506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); bch_extent_bad_expensive()
536 struct bucket *g; bch_extent_bad()
591 /* Keys with no pointers aren't restricted to one bucket and could bch_extent_merge()
H A Dmovinggc.c188 static bool bucket_cmp(struct bucket *l, struct bucket *r) bucket_cmp()
195 struct bucket *b; bucket_heap_top()
202 struct bucket *b; bch_moving_gc()
H A Dsuper.c151 err = "Bad block/bucket size"; read_super()
181 err = "Invalid superblock: first bucket comes before end of super"; read_super()
468 * For each bucket, we store on disk its
483 * header points to the first bucket, the first bucket points to the second
484 * bucket, et cetera.
501 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) prio_io() argument
508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; prio_io()
524 struct bucket *b; bch_prio_write()
540 long bucket; bch_prio_write() local
556 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); bch_prio_write()
557 BUG_ON(bucket == -1); bch_prio_write()
560 prio_io(ca, bucket, REQ_WRITE); bch_prio_write()
563 ca->prio_buckets[i] = bucket; bch_prio_write()
564 atomic_dec_bug(&ca->buckets[bucket].pin); bch_prio_write()
587 static void prio_read(struct cache *ca, uint64_t bucket) prio_read() argument
591 struct bucket *b; prio_read()
598 ca->prio_buckets[bucket_nr] = bucket; prio_read()
599 ca->prio_last_buckets[bucket_nr] = bucket; prio_read()
602 prio_io(ca, bucket, READ_SYNC); prio_read()
610 bucket = p->next_bucket; prio_read()
1655 err = "cannot allocate new UUID bucket";
1809 struct bucket *b; cache_alloc()
1826 !(ca->buckets = vzalloc(sizeof(struct bucket) * cache_alloc()
H A Djournal.c44 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); journal_read_bucket() local
55 bio->bi_iter.bi_sector = bucket + offset; journal_read_bucket()
68 * journal entries that overlap bucket boundaries; this means journal_read_bucket()
69 * the start of a bucket will always have a valid journal entry journal_read_bucket()
247 * the first time, it'll use the bucket after for_each_cache()
H A Dbtree.c9 * bucket priority is increased on cache hit, and periodically all the buckets
46 * On btree write error, mark bucket such that it won't be freed from the cache
74 * bucket to the next whole sector
275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", bch_btree_node_read_done()
321 bch_cache_set_error(b->c, "io error reading bucket %zu", bch_btree_node_read()
1086 "Tried to allocate bucket that was in btree cache"); __bch_btree_node_alloc()
1176 struct bucket *g; __bch_btree_mark_key()
1232 struct bucket *b = PTR_BUCKET(c, k, i); bch_initial_mark_key()
1633 struct bucket *b; btree_gc_start()
1659 struct bucket *b; bch_btree_gc_finish()
1851 struct bucket *b; bch_initial_gc_finish()
H A Dsysfs.c761 struct bucket *b;
H A Drequest.c487 * If the bucket was reused while our bio was in flight, we might have bch_cache_read_endio()
559 * The bucket we're reading from might be reused while our bio cache_lookup_fn()
H A Dbset.h30 * collection needs to find them to ensure bucket gens don't wrap around -
/linux-4.4.14/include/trace/events/
H A Dbcache.h67 __field(size_t, bucket )
71 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
74 TP_printk("bucket %zu", __entry->bucket)
245 __field(size_t, bucket )
251 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
256 TP_printk("bucket %zu", __entry->bucket)
347 __field(size_t, bucket )
352 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
356 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
406 TP_PROTO(struct cache *ca, size_t bucket),
407 TP_ARGS(ca, bucket),
417 __entry->offset = bucket << ca->set->bucket_bits;
418 __entry->sectors = GC_SECTORS_USED(&ca->buckets[bucket]);
427 TP_PROTO(struct cache *ca, size_t bucket),
428 TP_ARGS(ca, bucket),
437 __entry->offset = bucket << ca->set->bucket_bits;
/linux-4.4.14/arch/mips/netlogic/xlr/
H A Dfmn.c72 int bucket, rv; fmn_message_handler() local
81 /* 8 bkts per core, [24:31] each bit represents one bucket fmn_message_handler()
82 * Bit is Zero if bucket is not empty */ fmn_message_handler()
86 for (bucket = 0; bucket < 8; bucket++) { fmn_message_handler()
87 /* Continue on empty bucket */ fmn_message_handler()
88 if (bkt_status & (1 << bucket)) fmn_message_handler()
90 rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid, fmn_message_handler()
101 hndlr->action(bucket, src_stnid, size, code, fmn_message_handler()
133 /* Setup bucket sizes for the core. */ xlr_percpu_fmn_init()
145 * bucket. Program the credits this core has on the 128 possible xlr_percpu_fmn_init()
H A Dfmn-config.c106 * Configure bucket size and credits for a device. 'size' is the size of
152 /* Distributing cpu per bucket credits to devices */ setup_fmn_cc()
/linux-4.4.14/net/ipv4/netfilter/
H A Dnf_conntrack_l3proto_ipv4_compat.c29 unsigned int bucket; member in struct:ct_iter_state
38 for (st->bucket = 0; ct_get_first()
39 st->bucket < net->ct.htable_size; ct_get_first()
40 st->bucket++) { ct_get_first()
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); ct_get_first()
57 if (likely(get_nulls_value(head) == st->bucket)) { ct_get_next()
58 if (++st->bucket >= net->ct.htable_size) ct_get_next()
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); ct_get_next()
218 unsigned int bucket; member in struct:ct_expect_iter_state
227 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { ct_expect_get_first()
229 hlist_first_rcu(&net->ct.expect_hash[st->bucket])); ct_expect_get_first()
244 if (++st->bucket >= nf_ct_expect_hsize) ct_expect_get_next()
247 hlist_first_rcu(&net->ct.expect_hash[st->bucket])); ct_expect_get_next()
/linux-4.4.14/fs/nfs/
H A Dpnfs_nfs.c63 * If this will make the bucket empty, it will need to put the lseg reference.
76 struct pnfs_commit_bucket *bucket; pnfs_generic_clear_request_commit() local
78 bucket = list_first_entry(&req->wb_list, pnfs_generic_clear_request_commit()
81 freeme = bucket->wlseg; pnfs_generic_clear_request_commit()
82 bucket->wlseg = NULL; pnfs_generic_clear_request_commit()
114 pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, pnfs_generic_scan_ds_commit_list() argument
118 struct list_head *src = &bucket->written; pnfs_generic_scan_ds_commit_list()
119 struct list_head *dst = &bucket->committing; pnfs_generic_scan_ds_commit_list()
127 if (bucket->clseg == NULL) pnfs_generic_scan_ds_commit_list()
128 bucket->clseg = pnfs_get_lseg(bucket->wlseg); pnfs_generic_scan_ds_commit_list()
130 pnfs_put_lseg_locked(bucket->wlseg); pnfs_generic_scan_ds_commit_list()
131 bucket->wlseg = NULL; pnfs_generic_scan_ds_commit_list()
184 struct pnfs_commit_bucket *bucket; pnfs_generic_retry_commit() local
191 bucket = &fl_cinfo->buckets[i]; pnfs_generic_retry_commit()
192 if (list_empty(&bucket->committing)) pnfs_generic_retry_commit()
194 freeme = bucket->clseg; pnfs_generic_retry_commit()
195 bucket->clseg = NULL; pnfs_generic_retry_commit()
196 list_splice_init(&bucket->committing, &pages); pnfs_generic_retry_commit()
210 struct pnfs_commit_bucket *bucket; pnfs_generic_alloc_ds_commits() local
216 bucket = fl_cinfo->buckets; pnfs_generic_alloc_ds_commits()
217 for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { pnfs_generic_alloc_ds_commits()
218 if (list_empty(&bucket->committing)) pnfs_generic_alloc_ds_commits()
238 struct pnfs_commit_bucket *bucket; pnfs_fetch_commit_bucket_list() local
240 bucket = &cinfo->ds->buckets[data->ds_commit_index]; pnfs_fetch_commit_bucket_list()
242 list_splice_init(&bucket->committing, pages); pnfs_fetch_commit_bucket_list()
243 data->lseg = bucket->clseg; pnfs_fetch_commit_bucket_list()
244 bucket->clseg = NULL; pnfs_fetch_commit_bucket_list()
H A Ddirect.c157 * @commit_idx - commit bucket index for the DS
/linux-4.4.14/net/9p/
H A Derror.c196 int bucket; p9_error_init() local
199 for (bucket = 0; bucket < ERRHASHSZ; bucket++) p9_error_init()
200 INIT_HLIST_HEAD(&hash_errmap[bucket]); p9_error_init()
205 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; p9_error_init()
207 hlist_add_head(&c->list, &hash_errmap[bucket]); p9_error_init()
225 int bucket; p9_errstr2errno() local
229 bucket = jhash(errstr, len, 0) % ERRHASHSZ; p9_errstr2errno()
230 hlist_for_each_entry(c, &hash_errmap[bucket], list) { p9_errstr2errno()
/linux-4.4.14/drivers/md/persistent-data/
H A Ddm-transaction-manager.c106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); is_shadow() local
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) is_shadow()
126 unsigned bucket; insert_shadow() local
132 bucket = dm_hash_block(b, DM_HASH_MASK); insert_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); insert_shadow()
143 struct hlist_head *bucket; wipe_shadow_table() local
148 bucket = tm->buckets + i; wipe_shadow_table()
149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) wipe_shadow_table()
152 INIT_HLIST_HEAD(bucket); wipe_shadow_table()
/linux-4.4.14/drivers/staging/lustre/include/linux/libcfs/
H A Dlibcfs_hash.h99 union cfs_hash_lock hsb_lock; /**< bucket lock */
102 unsigned int hsb_index; /**< index of bucket */
103 int hsb_depmax; /**< max depth on bucket */
108 * cfs_hash bucket descriptor, it's normally in stack of caller
111 struct cfs_hash_bucket *bd_bucket; /**< address of bucket */
112 unsigned int bd_offset; /**< offset in bucket */
118 #define CFS_HASH_BKT_BITS 3 /**< default bits of bucket */
119 #define CFS_HASH_BITS_MAX 30 /**< max bits of bucket */
135 /** no bucket lock, use one spinlock to protect the whole hash */
137 /** rwlock to protect bucket */
139 /** spinlock to protect bucket */
177 * nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
194 * the global wrlock for each bucket.
208 * locations; additions must take care to only insert into the new bucket.
227 /** # of extra-bytes for bucket, for user saving extended attributes */
241 /** bits for each bucket */
262 /** id of the deepest bucket */
264 /** offset in the deepest bucket */
280 /** lock the hash bucket */
282 /** unlock the hash bucket */
315 /** get refcount of item, always called with holding bucket-lock */
319 /** release refcount of item, always called with holding bucket-lock */
334 /** number of hlist for in bucket */
353 /* no bucket lock, one single lock to protect the hash-table */ cfs_hash_with_no_bktlock()
360 /* rwlock to protect hash bucket */ cfs_hash_with_rw_bktlock()
367 /* spinlock to protect hash bucket */ cfs_hash_with_spin_bktlock()
549 * operations on cfs_hash bucket (bd: bucket descriptor),
653 * operations on cfs_hash bucket (bd: bucket descriptor),
756 /* Validate hnode is in the correct bucket */
860 /** iterate over all hlist of bucket @bd */
/linux-4.4.14/lib/
H A Ddma-debug.c252 * Request exclusive access to a hash bucket for a given dma_debug_entry.
266 * Give up exclusive access to the hash bucket
268 static void put_hash_bucket(struct hash_bucket *bucket, put_hash_bucket() argument
273 spin_unlock_irqrestore(&bucket->lock, __flags); put_hash_bucket()
296 * Search a given entry in the hash bucket list
298 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, __hash_bucket_find() argument
305 list_for_each_entry(entry, &bucket->list, list) { __hash_bucket_find()
348 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, bucket_find_exact() argument
351 return __hash_bucket_find(bucket, ref, exact_match); bucket_find_exact()
354 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, bucket_find_contain() argument
364 entry = __hash_bucket_find(*bucket, ref, containing_match); bucket_find_contain()
370 * Nothing found, go back a hash bucket bucket_find_contain()
372 put_hash_bucket(*bucket, flags); bucket_find_contain()
375 *bucket = get_hash_bucket(&index, flags); bucket_find_contain()
382 * Add an entry to a hash bucket
384 static void hash_bucket_add(struct hash_bucket *bucket, hash_bucket_add() argument
387 list_add_tail(&entry->list, &bucket->list); hash_bucket_add()
391 * Remove entry from a hash bucket list
411 struct hash_bucket *bucket = &dma_entry_hash[idx]; debug_dma_dump_mappings() local
415 spin_lock_irqsave(&bucket->lock, flags); debug_dma_dump_mappings()
417 list_for_each_entry(entry, &bucket->list, list) { debug_dma_dump_mappings()
429 spin_unlock_irqrestore(&bucket->lock, flags); debug_dma_dump_mappings()
613 struct hash_bucket *bucket; add_dma_entry() local
617 bucket = get_hash_bucket(entry, &flags); add_dma_entry()
618 hash_bucket_add(bucket, entry); add_dma_entry()
619 put_hash_bucket(bucket, &flags); add_dma_entry()
1077 struct hash_bucket *bucket; check_unmap() local
1080 bucket = get_hash_bucket(ref, &flags); check_unmap()
1081 entry = bucket_find_exact(bucket, ref); check_unmap()
1085 put_hash_bucket(bucket, &flags); check_unmap()
1162 put_hash_bucket(bucket, &flags); check_unmap()
1194 struct hash_bucket *bucket; check_sync() local
1197 bucket = get_hash_bucket(ref, &flags); check_sync()
1199 entry = bucket_find_contain(&bucket, ref, &flags); check_sync()
1261 put_hash_bucket(bucket, &flags); check_sync()
1307 struct hash_bucket *bucket; debug_dma_mapping_error() local
1315 bucket = get_hash_bucket(&ref, &flags); debug_dma_mapping_error()
1317 list_for_each_entry(entry, &bucket->list, list) { debug_dma_mapping_error()
1337 put_hash_bucket(bucket, &flags); debug_dma_mapping_error()
1401 struct hash_bucket *bucket; get_nr_mapped_entries() local
1405 bucket = get_hash_bucket(ref, &flags); get_nr_mapped_entries()
1406 entry = bucket_find_exact(bucket, ref); get_nr_mapped_entries()
1411 put_hash_bucket(bucket, &flags); get_nr_mapped_entries()
H A Drhashtable.c76 /* Never allocate more than 0.5 locks per bucket */ alloc_bucket_locks()
220 /* Protect future_tbl using the first bucket lock. */ rhashtable_rehash_attach()
277 * A secondary bucket array is allocated and the hash entries are migrated.
286 * bucket locks or concurrent RCU protected lookups and traversals.
322 * bucket locks or concurrent RCU protected lookups and traversals.
815 * must occur in a compatible manner. Then frees the bucket array.
H A Ddebugobjects.c111 * Lookup an object in the hash bucket.
241 * for freed objects simply by checking the affected bucket.
/linux-4.4.14/include/linux/
H A Dhashtable.h117 * @bkt: integer to use as bucket loop cursor
129 * @bkt: integer to use as bucket loop cursor
142 * @bkt: integer to use as bucket loop cursor
154 * same bucket
165 * same bucket in an rcu enabled hashtable
178 * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
193 * same bucket safe against removals
H A Drhashtable.h40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
59 * @rehash: Current bucket being rehashed
111 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
305 /* The bucket lock is selected based on the hash and protects mutations
308 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
310 * entries which link to the same bucket of the old table during resizing.
311 * This allows to simplify the locking as locking the bucket in both
314 * IMPORTANT: When holding the bucket lock of both the old and new table
315 * during expansions and shrinking, the old bucket lock must always be
380 * @hash: the hash value / bucket index
391 * @hash: the hash value / bucket index
402 * @hash: the hash value / bucket index
415 * @hash: the hash value / bucket index
428 * @hash: the hash value / bucket index
448 * @hash: the hash value / bucket index
464 * @hash: the hash value / bucket index
479 * @hash: the hash value / bucket index
497 * @hash: the hash value / bucket index
523 * Computes the hash value for the key and traverses the bucket chain looking
587 * the hashed bucket that is yet to be rehashed. __rhashtable_insert_fast()
663 * Will take a per bucket spinlock to protect against mutual mutations
664 * on the same bucket. Multiple insertions may occur in parallel unless
665 * they map to the same bucket lock.
686 * Locks down the bucket chain in both the old and new table if a resize
721 * Locks down the bucket chain in both the old and new table if a resize
784 * walk the bucket chain upon removal. The removal operation is thus
803 /* Because we have already taken (and released) the bucket rhashtable_remove_fast()
/linux-4.4.14/net/atm/
H A Dproc.c69 int bucket; member in struct:vcc_state
79 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) __vcc_walk() argument
84 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { __vcc_walk()
85 struct hlist_head *head = &vcc_hash[*bucket]; __vcc_walk()
99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { __vcc_walk()
100 sk = sk_head(&vcc_hash[*bucket]); __vcc_walk()
111 return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? vcc_walk()
/linux-4.4.14/include/linux/crush/
H A Dcrush.h96 * A bucket is a named container of other items (either devices or
97 * other buckets). Items within a bucket are chosen using one of a
138 * cached random permutation: used for uniform bucket and for
139 * the linear search fallback for the other bucket types.
216 * allowed bucket algs is a bitmask, here the bit positions
220 * minimize confusion (bucket type values start at 1).
/linux-4.4.14/fs/omfs/
H A Ddir.c21 * Finds the bucket for a given name and reads the containing block;
28 int bucket = omfs_hash(name, namelen, nbuckets); omfs_get_bucket() local
30 *ofs = OMFS_DIR_START + bucket * 8; omfs_get_bucket()
122 /* just prepend to head of queue in proper bucket */ omfs_add_link()
169 /* delete the proper node in the bucket's linked list */ omfs_delete_entry()
333 /* follow chain in this bucket */ omfs_fill_chain()
423 /* high 12 bits store bucket + 1 and low 20 bits store hash index */ omfs_readdir()
H A Domfs_fs.h60 __be64 i_sibling; /* next inode in hash bucket */
/linux-4.4.14/drivers/cpuidle/governors/
H A Dmenu.c127 unsigned int bucket; member in struct:menu_device
144 int bucket = 0; which_bucket() local
153 bucket = BUCKETS/2; which_bucket()
156 return bucket; which_bucket()
158 return bucket + 1; which_bucket()
160 return bucket + 2; which_bucket()
162 return bucket + 3; which_bucket()
164 return bucket + 4; which_bucket()
165 return bucket + 5; which_bucket()
307 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); menu_select()
315 data->correction_factor[data->bucket], menu_select()
415 new_factor = data->correction_factor[data->bucket]; menu_update()
436 data->correction_factor[data->bucket] = new_factor; menu_update()
/linux-4.4.14/arch/tile/include/gxio/
H A Dmpipe.h80 * used to hold the packet, and which bucket will be used by the load
83 * The rules by which the buffer stack and bucket are chosen can be
85 * specify multiple rules, each one specifying a bucket range, and a
97 * the bucket number indicated by the classification program. In
98 * general, the bucket number is based on some number of low bits of
100 * hashing use a single bucket). Each load balancer bucket keeps a
101 * record of the NotifRing to which packets directed to that bucket
102 * are currently being delivered. Based on the bucket's load
107 * associated with the bucket.
634 * makes decisions based on both bucket and NotifGroup state, most
660 * @param first Index of first bucket if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
663 * @return Index of first allocated buffer bucket, or
677 * load balancer bucket. Based on that bucket's load balancing mode,
682 /* All packets for a bucket go to the same NotifRing unless the
684 * the bucket reference count ever reaches zero, a new NotifRing may
690 /* All packets for a bucket always go to the same NotifRing.
695 /* All packets for a bucket go to the least full NotifRing in the
701 /* All packets for a bucket go to the same NotifRing unless the
702 * NotifRing gets full, at which point the bucket starts using the
709 /* All packets for a bucket go to the same NotifRing unless the
711 * bucket starts using the least full NotifRing in the group. If
720 /* Copy a set of bucket initialization values into the mPIPE
722 * bucket and NotifGroup state, most applications should use
724 * function to configure a single bucket.
727 * @param bucket Bucket index to be initialized.
732 unsigned int bucket,
740 * Second, each bucket is initialized with the mode and group, and a
743 * Normally, the classifier picks a bucket, and then the load balancer
744 * picks a ring, based on the bucket's mode, group, and current ring,
745 * possibly updating the bucket's ring.
751 * @param bucket The first bucket.
764 unsigned int bucket,
769 /* Return credits to a NotifRing and/or bucket.
773 * @param bucket The bucket, or -1.
777 int ring, int bucket, unsigned int count) gxio_mpipe_credit()
792 offset.bucket = bucket; gxio_mpipe_credit()
794 offset.bucket_enable = (bucket >= 0); gxio_mpipe_credit()
909 * (based on the packet size) and a bucket (based on the flow hash).
965 * @param bucket First load balancer bucket to which packets will be
978 unsigned int bucket,
1178 /* Release the ring and bucket for an old entry in an iqueue.
1182 * Releasing the bucket allows flows using the bucket to be moved to a
1214 * Note that if you are using a single bucket, and you are handling
1217 * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)".
1222 * avoid incorrectly crediting the (unused) bucket.
776 gxio_mpipe_credit(gxio_mpipe_context_t *context, int ring, int bucket, unsigned int count) gxio_mpipe_credit() argument
H A Diorpc_mpipe.h96 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
/linux-4.4.14/fs/
H A Dmbcache.c583 unsigned int bucket; mb_cache_entry_insert() local
590 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), mb_cache_entry_insert()
592 block_hash_p = &cache->c_block_hash[bucket]; mb_cache_entry_insert()
609 bucket = hash_long(key, cache->c_bucket_bits);
610 index_hash_p = &cache->c_index_hash[bucket];
664 unsigned int bucket; mb_cache_entry_get() local
669 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), mb_cache_entry_get()
671 block_hash_p = &cache->c_block_hash[bucket]; mb_cache_entry_get()
789 unsigned int bucket = hash_long(key, cache->c_bucket_bits); mb_cache_entry_find_first() local
794 index_hash_p = &cache->c_index_hash[bucket]; mb_cache_entry_find_first()
828 unsigned int bucket = hash_long(key, cache->c_bucket_bits); mb_cache_entry_find_next() local
833 index_hash_p = &cache->c_index_hash[bucket]; mb_cache_entry_find_next()
H A Dseq_file.c1004 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); seq_hlist_next_percpu() local
1006 if (!hlist_empty(bucket)) seq_hlist_next_percpu()
1007 return bucket->first; seq_hlist_next_percpu()
/linux-4.4.14/net/netfilter/ipvs/
H A Dip_vs_lblc.c107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblc_table
175 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); ip_vs_lblc_hash()
188 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) ip_vs_lblc_get()
242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { ip_vs_lblc_flush()
271 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { ip_vs_lblc_full_check()
327 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { ip_vs_lblc_check_expire()
366 INIT_HLIST_HEAD(&tbl->bucket[i]); ip_vs_lblc_init_svc()
H A Dip_vs_lblcr.c277 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblcr_table
338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); ip_vs_lblcr_hash()
351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) ip_vs_lblcr_get()
408 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { ip_vs_lblcr_flush()
436 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { ip_vs_lblcr_full_check()
491 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { ip_vs_lblcr_check_expire()
529 INIT_HLIST_HEAD(&tbl->bucket[i]); ip_vs_lblcr_init_svc()
H A Dip_vs_dh.c29 * Notes that servernode is a 256-bucket hash table that maps the hash
51 * IPVS DH bucket
H A Dip_vs_sh.c26 * Notes that servernode is a 256-bucket hash table that maps the hash
57 * IPVS SH bucket
H A Dip_vs_ctl.c1871 int bucket; member in struct:ip_vs_iter
1907 iter->bucket = idx; ip_vs_info_array()
1919 iter->bucket = idx; ip_vs_info_array()
1955 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { ip_vs_info_seq_next()
1957 &ip_vs_svc_table[iter->bucket], ip_vs_info_seq_next()
1964 iter->bucket = -1; ip_vs_info_seq_next()
1974 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { ip_vs_info_seq_next()
1976 &ip_vs_svc_fwm_table[iter->bucket], ip_vs_info_seq_next()
/linux-4.4.14/net/netfilter/
H A Dnf_conntrack_standalone.c51 unsigned int bucket; member in struct:ct_iter_state
61 for (st->bucket = 0; ct_get_first()
62 st->bucket < net->ct.htable_size; ct_get_first()
63 st->bucket++) { ct_get_first()
64 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); ct_get_first()
79 if (likely(get_nulls_value(head) == st->bucket)) { ct_get_next()
80 if (++st->bucket >= net->ct.htable_size) ct_get_next()
85 &net->ct.hash[st->bucket])); ct_get_next()
H A Dnf_conntrack_expect.c466 unsigned int bucket; member in struct:ct_expect_iter_state
475 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { ct_expect_get_first()
476 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); ct_expect_get_first()
491 if (++st->bucket >= nf_ct_expect_hsize) ct_expect_get_next()
493 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); ct_expect_get_next()
H A Dxt_hashlimit.c42 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
751 unsigned int *bucket; variable
757 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
758 if (!bucket)
761 *bucket = *pos;
762 return bucket;
768 unsigned int *bucket = (unsigned int *)v; dl_seq_next() local
770 *pos = ++(*bucket); dl_seq_next()
775 return bucket; dl_seq_next()
782 unsigned int *bucket = (unsigned int *)v; variable
784 if (!IS_ERR(bucket))
785 kfree(bucket); variable
831 unsigned int *bucket = (unsigned int *)v; dl_seq_show() local
834 if (!hlist_empty(&htable->hash[*bucket])) { dl_seq_show()
835 hlist_for_each_entry(ent, &htable->hash[*bucket], node) dl_seq_show()
H A Dxt_recent.c479 unsigned int bucket; member in struct:recent_iter_state
492 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) __acquires()
493 list_for_each_entry(e, &t->iphash[st->bucket], list) __acquires()
506 while (head == &t->iphash[st->bucket]) { recent_seq_next()
507 if (++st->bucket >= ip_list_hash_size) recent_seq_next()
509 head = t->iphash[st->bucket].next; recent_seq_next()
H A Dnf_conntrack_core.c453 unsigned int bucket = hash_bucket(hash, net); ____nf_conntrack_find() local
460 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { ____nf_conntrack_find()
473 if (get_nulls_value(n) != bucket) { ____nf_conntrack_find()
1374 void *data, unsigned int *bucket) get_next_corpse()
1382 for (; *bucket < net->ct.htable_size; (*bucket)++) { get_next_corpse()
1383 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; get_next_corpse()
1386 if (*bucket < net->ct.htable_size) { get_next_corpse()
1387 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { get_next_corpse()
1423 unsigned int bucket = 0; nf_ct_iterate_cleanup() local
1425 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { nf_ct_iterate_cleanup()
1563 int i, bucket, rc; nf_conntrack_set_hashsize() local
1602 bucket = __hash_conntrack(&h->tuple, hashsize); nf_conntrack_set_hashsize()
1603 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); nf_conntrack_set_hashsize()
1373 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) get_next_corpse() argument
H A Dnfnetlink_queue.c1262 unsigned int bucket; member in struct:iter_state
1276 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { get_first()
1277 if (!hlist_empty(&q->instance_table[st->bucket])) get_first()
1278 return q->instance_table[st->bucket].first; get_first()
1292 if (++st->bucket >= INSTANCE_BUCKETS) get_next()
1296 h = q->instance_table[st->bucket].first; get_next()
H A Dnfnetlink_log.c958 unsigned int bucket; member in struct:iter_state
969 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { get_first()
970 struct hlist_head *head = &log->instance_table[st->bucket]; get_first()
986 if (++st->bucket >= INSTANCE_BUCKETS) get_next()
990 head = &log->instance_table[st->bucket]; get_next()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
H A Dhash.c58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
66 * - support both spin_lock/rwlock for bucket:
69 * bucket is more reasonable for those frequently changed hash tables
169 /** no bucket lock, one spinlock to protect everything */
177 /** spin bucket lock, rehash is enabled */
185 /** rw bucket lock, rehash is enabled */
193 /** spin bucket lock, rehash is disabled */
201 /** rw bucket lock, rehash is disabled */
713 * NB: it's possible that several bds point to the same bucket but cfs_hash_multi_bd_lock()
777 if (i == 1) { /* only one bucket */
908 * Create or grow bucket memory. Return old_buckets if no allocation was
992 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n", cfs_hash_dep_print()
1137 "hash %s bucket %u(%u) is not empty: %u items left\n", hlist_for_each_safe()
1304 * is required to ensure the correct hash bucket is locked since there
1305 * is no direct linkage from the item to the bucket. The object
1436 * . the bucket lock is held so the callback must never sleep.
1510 * The write lock being hold during loop for each bucket to avoid
1574 * one bucket to another bucket
1578 * hash bucket.
1625 } else { /* bucket changed? */ cfs_hash_for_each_relax()
1669 * For each hash bucket in the libcfs hash @hs call the passed callback
1734 * item and the private @data. During the callback the bucket lock
1855 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */ cfs_hash_bd_for_each_hlist()
1860 /* Validate hnode is in the correct bucket. */ hlist_for_each_safe()
1863 * Delete from old hash bucket; move to new bucket. hlist_for_each_safe()
/linux-4.4.14/net/ipv4/
H A Dtcp_ipv4.c118 held not per host, but per port pair and TW bucket is used as state tcp_twsk_unique()
121 If TW bucket has been already destroyed we fall back to VJ's scheme tcp_twsk_unique()
1812 /* Clean up a referenced TCP bind bucket. */ tcp_v4_destroy_sock()
1832 * starting from bucket given in st->bucket; when st->bucket is zero the
1845 ilb = &tcp_hashinfo.listening_hash[st->bucket]; listening_get_next()
1851 ilb = &tcp_hashinfo.listening_hash[st->bucket]; listening_get_next()
1868 if (++st->bucket < INET_LHTABLE_SIZE) {
1869 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1884 st->bucket = 0; listening_get_idx()
1897 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); empty_bucket()
1901 * Get first established socket starting from bucket given in st->bucket.
1902 * If st->bucket is zero, the very first socket in the hash is returned.
1911 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { established_get_first()
1914 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); established_get_first()
1921 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { established_get_first()
1952 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1953 ++st->bucket;
1962 st->bucket = 0; established_get_idx()
1997 if (st->bucket >= INET_LHTABLE_SIZE) tcp_seek_last_pos()
2005 st->bucket = 0; tcp_seek_last_pos()
2009 if (st->bucket > tcp_hashinfo.ehash_mask) tcp_seek_last_pos()
2034 st->bucket = 0; tcp_seq_start()
2058 st->bucket = 0; tcp_seq_next()
2080 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); tcp_seq_stop()
2084 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); tcp_seq_stop()
H A Dinet_timewait_sock.c54 /* Disassociate with bind bucket. */ inet_twsk_kill()
98 * Essentially we whip up a timewait bucket, copy the relevant info into it
244 * kill tw bucket after 3.5*RTO (it is important that this number __inet_twsk_schedule()
H A Dping.c1022 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; ping_get_first()
1023 ++state->bucket) { ping_get_first()
1027 hslot = &ping_table.hash[state->bucket]; ping_get_first()
1053 return ping_get_first(seq, state->bucket + 1); ping_get_next()
1070 state->bucket = 0; ping_seq_start()
1105 int bucket) ping_v4_format_sock()
1115 bucket, src, srcp, dest, destp, sp->sk_state, ping_v4_format_sock()
1135 ping_v4_format_sock(v, seq, state->bucket); ping_v4_seq_show()
1104 ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket) ping_v4_format_sock() argument
H A Dinet_fragment.c336 * we acquired hash bucket lock. inet_frag_intern()
440 static const char msg[] = "inet_frag_find: Fragment hash bucket" inet_frag_maybe_warn_overflow()
H A Draw.c951 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; raw_get_first()
952 ++state->bucket) { raw_get_first()
953 sk_for_each(sk, &state->h->ht[state->bucket]) raw_get_first()
972 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { raw_get_next()
973 sk = sk_head(&state->h->ht[state->bucket]); raw_get_next()
1045 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); raw_seq_show()
H A Dudp.c2308 for (state->bucket = start; state->bucket <= state->udp_table->mask; udp_get_first()
2309 ++state->bucket) { udp_get_first()
2311 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; udp_get_first()
2340 if (state->bucket <= state->udp_table->mask) udp_get_next()
2341 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); udp_get_next()
2342 return udp_get_first(seq, state->bucket + 1); udp_get_next()
2360 state->bucket = MAX_UDP_PORTS; udp_seq_start()
2382 if (state->bucket <= state->udp_table->mask) udp_seq_stop()
2383 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); udp_seq_stop()
2430 int bucket) udp4_format_sock()
2440 bucket, src, srcp, dest, destp, sp->sk_state, udp4_format_sock()
2460 udp4_format_sock(v, seq, state->bucket); udp4_seq_show()
2429 udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket) udp4_format_sock() argument
H A Dicmp.c49 * bucket filter (thanks to ANK). Make
249 * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
258 /* Check if token bucket is empty and cannot be refilled icmp_global_allow()
1213 * bucket ratemask defines which icmp types are ratelimited by
H A Dinet_hashtables.c58 * Allocate and initialize a new local port bind bucket.
149 * create a new bind bucket for the child here. */ __inet_inherit_port()
H A Dtcp_minisocks.c58 /* Send ACK. Note, we do not put the bucket, tcp_timewait_check_oow_rate_limit()
91 * NOTE. With recycling (and later with fin-wait-2) TW bucket
308 * The timewait bucket does not have the key DB from the tcp_time_wait()
H A Dinetpeer.c479 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
H A Dcipso_ipv4.c307 * first in the cache bucket:
375 * head of the cache bucket's list, if the cache bucket is out of room remove
/linux-4.4.14/drivers/md/
H A Ddm-cache-policy-cleaner.c145 struct hlist_head *bucket = &hash->table[h]; lookup_cache_entry() local
147 hlist_for_each_entry(cur, bucket, hlist) { hlist_for_each_entry()
149 /* Move upfront bucket for faster access. */ hlist_for_each_entry()
151 hlist_add_head(&cur->hlist, bucket); hlist_for_each_entry()
H A Ddm-cache-policy-smq.c586 static struct entry *h_head(struct hash_table *ht, unsigned bucket) h_head() argument
588 return to_entry(ht->es, ht->buckets[bucket]); h_head()
596 static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e) __h_insert() argument
598 e->hash_next = ht->buckets[bucket]; __h_insert()
599 ht->buckets[bucket] = to_index(ht->es, e); __h_insert()
634 * Also moves each entry to the front of the bucket.
661 * iterate the bucket to remove an item. h_remove()
H A Ddm-region-hash.c206 DMERR("unable to allocate region hash bucket memory"); dm_region_hash_create()
272 struct list_head *bucket = rh->buckets + rh_hash(rh, region); __rh_lookup() local
274 list_for_each_entry(reg, bucket, hash_list) __rh_lookup()
H A Ddm-cache-policy-mq.c505 struct hlist_head *bucket = mq->table + h; hash_lookup() local
508 hlist_for_each_entry(e, bucket, hlist) hlist_for_each_entry()
511 hlist_add_head(&e->hlist, bucket); hlist_for_each_entry()
H A Draid5.h90 * stripe is also (potentially) linked to a hash bucket in the hash
95 * The inactive_list, handle_list and hash bucket lists are all protected by the
/linux-4.4.14/net/core/
H A Dnet-procfs.c34 unsigned int bucket; dev_from_bucket() local
41 bucket = get_bucket(*pos) + 1; dev_from_bucket()
42 *pos = set_bucket_offset(bucket, 1); dev_from_bucket()
43 } while (bucket < NETDEV_HASHENTRIES); dev_from_bucket()
H A Dneighbour.c2501 int bucket = state->bucket; neigh_get_first() local
2504 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { neigh_get_first()
2505 n = rcu_dereference_bh(nht->hash_buckets[bucket]); neigh_get_first()
2529 state->bucket = bucket; neigh_get_first()
2571 if (++state->bucket >= (1 << nht->hash_shift)) neigh_get_next()
2574 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); neigh_get_next()
2603 int bucket = state->bucket; pneigh_get_first() local
2606 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { pneigh_get_first()
2607 pn = tbl->phash_buckets[bucket]; pneigh_get_first()
2613 state->bucket = bucket; pneigh_get_first()
2631 if (++state->bucket > PNEIGH_HASHMASK) pneigh_get_next()
2633 pn = tbl->phash_buckets[state->bucket]; pneigh_get_next()
2680 state->bucket = 0; __acquires()
/linux-4.4.14/include/net/
H A Draw.h44 int bucket; member in struct:raw_iter_state
H A Dtransp_v6.h48 __u16 srcp, __u16 destp, int bucket);
H A Dping.h51 int bucket; member in struct:ping_iter_state
H A Dinet_frag.h36 * @list: hash bucket list
H A Dinet_hashtables.h61 * below. As we add sockets to a bind bucket list, we perform a
63 * As long as all sockets added to a bind bucket pass this test,
H A Dudp.h316 int bucket; member in struct:udp_iter_state
H A Dneighbour.h370 unsigned int bucket; member in struct:neigh_seq_state
/linux-4.4.14/arch/sparc/kernel/
H A Dirq_64.c206 struct ino_bucket bucket; member in struct:irq_handler_data
257 struct ino_bucket *bucket; cookie_exists() local
268 bucket = (struct ino_bucket *) __va(cookie); cookie_exists()
269 irq = bucket->__irq; cookie_exists()
278 struct ino_bucket *bucket; sysino_exists() local
281 bucket = &ivector_table[sysino]; sysino_exists()
282 irq = bucket_get_irq(__pa(bucket)); sysino_exists()
615 struct ino_bucket *bucket; build_irq() local
622 bucket = &ivector_table[ino]; build_irq()
623 irq = bucket_get_irq(__pa(bucket)); build_irq()
626 bucket_set_irq(__pa(bucket), irq); build_irq()
687 ihd->bucket.__irq = irq; cookie_assign()
688 cookie = ~__pa(&ihd->bucket); cookie_assign()
738 struct ino_bucket *bucket; sysino_set_bucket() local
743 bucket = &ivector_table[sysino]; sysino_set_bucket()
744 bucket_set_irq(__pa(bucket), irq); sysino_set_bucket()
H A Dprom_irqtrans.c60 /* Now build the IRQ bucket. */ psycho_irq_build()
241 /* Now build the IRQ bucket. */ sabre_irq_build()
390 /* Now build the IRQ bucket. */ schizo_irq_build()
520 /* Now build the IRQ bucket. */ fire_irq_build()
H A Dkprobes.c542 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/arch/mn10300/kernel/
H A Dprofile-low.S56 # increment the appropriate profile bucket
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_doorbell.c128 u32 bucket = VMCI_DOORBELL_HASH(idx); dbell_index_table_find() local
131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], dbell_index_table_find()
147 u32 bucket; dbell_index_table_add() local
195 bucket = VMCI_DOORBELL_HASH(entry->idx); dbell_index_table_add()
196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); dbell_index_table_add()
359 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); dbell_fire_entries() local
364 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { dbell_fire_entries()
/linux-4.4.14/arch/tile/gxio/
H A Dmpipe.c174 unsigned int bucket, gxio_mpipe_init_notif_group_and_buckets()
199 result = gxio_mpipe_init_bucket(context, bucket + i, gxio_mpipe_init_notif_group_and_buckets()
233 unsigned int bucket, unsigned int num_buckets, gxio_mpipe_rules_begin()
279 /* Save the bucket info. */ gxio_mpipe_rules_begin()
281 rule->bucket_first = bucket; gxio_mpipe_rules_begin()
170 gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context, unsigned int group, unsigned int ring, unsigned int num_rings, unsigned int bucket, unsigned int num_buckets, gxio_mpipe_bucket_mode_t mode) gxio_mpipe_init_notif_group_and_buckets() argument
232 gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules, unsigned int bucket, unsigned int num_buckets, gxio_mpipe_rules_stacks_t *stacks) gxio_mpipe_rules_begin() argument
H A Diorpc_mpipe.c243 unsigned int bucket; member in struct:init_bucket_param
247 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, gxio_mpipe_init_bucket() argument
253 params->bucket = bucket; gxio_mpipe_init_bucket()
/linux-4.4.14/arch/tile/include/arch/
H A Dmpipe.h43 uint_reg_t bucket : 13; member in struct:__anon2761::__anon2762
66 uint_reg_t bucket : 13;
348 /* NotifRing currently assigned to this bucket. */
352 /* Group associated with this bucket. */
354 /* Mode select for this bucket. */
/linux-4.4.14/fs/xfs/
H A Dxfs_fsops.c156 int bucket; xfs_growfs_data_private() local
280 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) xfs_growfs_data_private()
281 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); xfs_growfs_data_private()
317 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) xfs_growfs_data_private()
318 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); xfs_growfs_data_private()
H A Dxfs_log_recover.c1583 struct list_head *bucket; xlog_recover_buffer_pass1() local
1598 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); list_for_each_entry()
1599 list_for_each_entry(bcp, bucket, bc_list) { list_for_each_entry()
1612 list_add_tail(&bcp->bc_list, bucket);
1630 struct list_head *bucket; xlog_peek_buffer_cancelled() local
1639 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); list_for_each_entry()
1640 list_for_each_entry(bcp, bucket, bc_list) { list_for_each_entry()
3956 * in an agi unlinked inode hash bucket.
3962 int bucket) xlog_recover_clear_agi_bucket()
3980 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); xlog_recover_clear_agi_bucket()
3982 (sizeof(xfs_agino_t) * bucket); xlog_recover_clear_agi_bucket()
4003 int bucket) xlog_recover_process_one_iunlink()
4017 * Get the on disk inode to find the next inode in the bucket. xlog_recover_process_one_iunlink()
4043 * We can't read in the inode this bucket points to, or this inode xlog_recover_process_one_iunlink()
4044 * is messed up. Just ditch this bucket of inodes. We will lose xlog_recover_process_one_iunlink()
4048 * clear the inode pointer in the bucket. xlog_recover_process_one_iunlink()
4050 xlog_recover_clear_agi_bucket(mp, agno, bucket); xlog_recover_process_one_iunlink()
4075 int bucket; xlog_recover_process_iunlinks() local
4106 * initial unlinked bucket entries out of the buffer. We keep xlog_recover_process_iunlinks()
4113 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { xlog_recover_process_iunlinks()
4114 agino = be32_to_cpu(agi->agi_unlinked[bucket]); xlog_recover_process_iunlinks()
4117 agno, agino, bucket); xlog_recover_process_iunlinks()
3959 xlog_recover_clear_agi_bucket( xfs_mount_t *mp, xfs_agnumber_t agno, int bucket) xlog_recover_clear_agi_bucket() argument
3999 xlog_recover_process_one_iunlink( struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agino_t agino, int bucket) xlog_recover_process_one_iunlink() argument
/linux-4.4.14/net/mac80211/
H A Dmesh_pathtbl.c339 struct hlist_head *bucket; mpath_lookup() local
342 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; hlist_for_each_entry_rcu()
343 hlist_for_each_entry_rcu(node, bucket, list) { hlist_for_each_entry_rcu()
534 struct hlist_head *bucket; mesh_path_add() local
553 bucket = &tbl->hash_buckets[hash_idx]; mesh_path_add()
557 hlist_for_each_entry(node, bucket, list) { hlist_for_each_entry()
586 hlist_add_head_rcu(&new_node->list, bucket);
668 struct hlist_head *bucket; mpp_path_add() local
703 bucket = &tbl->hash_buckets[hash_idx]; mpp_path_add()
708 hlist_for_each_entry(node, bucket, list) { hlist_for_each_entry()
715 hlist_add_head_rcu(&new_node->list, bucket);
891 struct hlist_head *bucket; mesh_path_del() local
898 bucket = &tbl->hash_buckets[hash_idx]; mesh_path_del()
901 hlist_for_each_entry(node, bucket, list) { hlist_for_each_entry()
H A Dmesh.h92 * an mpath to a hash bucket on a path table.
132 * @hashwlock: array of locks to protect write operations, one per bucket
150 spinlock_t *hashwlock; /* One per bucket, for add/del */
189 struct list_head bucket[RMC_BUCKETS]; member in struct:mesh_rmc
H A Dmesh.c180 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); mesh_rmc_init()
194 list_for_each_entry_safe(p, n, &rmc->bucket[i], list) { mesh_rmc_free()
229 list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { mesh_rmc_check()
247 list_add(&p->list, &rmc->bucket[idx]); mesh_rmc_check()
/linux-4.4.14/drivers/s390/scsi/
H A Dzfcp_reqlist.h19 * @list: Array of hashbuckets, each is a list of requests in this bucket
149 * are added here with list_add_tail at the end of the bucket lists
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/
H A Dl2t.h68 * pointer. Finally, each node is a bucket of a hash table, pointing to the
83 u16 hash; /* hash bucket the entry is on */
/linux-4.4.14/drivers/gpu/drm/i915/
H A Di915_gem_batch_pool.c107 /* Compute a power-of-two bucket, but throw everything greater than i915_gem_batch_pool_get()
108 * 16KiB into the same bucket: i.e. the the buckets hold objects of i915_gem_batch_pool_get()
H A Di915_cmd_parser.c626 * we mask a command from a batch it could hash to the wrong bucket due to
628 * commands may hash to the same bucket due to not including opcode bits that
629 * make the command unique. For now, we will risk hashing to the same bucket.
/linux-4.4.14/net/openvswitch/
H A Dflow_table.h75 u32 *bucket, u32 *idx);
H A Dvport.c109 struct hlist_head *bucket = hash_bucket(net, name); ovs_vport_locate() local
112 hlist_for_each_entry_rcu(vport, bucket, hash_node) ovs_vport_locate()
207 struct hlist_head *bucket; ovs_vport_add() local
218 bucket = hash_bucket(ovs_dp_get_net(vport->dp), ovs_vport_add()
220 hlist_add_head_rcu(&vport->hash_node, bucket); ovs_vport_add()
H A Dflow_table.c292 u32 *bucket, u32 *last) ovs_flow_tbl_dump_next()
300 while (*bucket < ti->n_buckets) { ovs_flow_tbl_dump_next()
302 head = flex_array_get(ti->buckets, *bucket); hlist_for_each_entry_rcu()
311 (*bucket)++;
291 ovs_flow_tbl_dump_next(struct table_instance *ti, u32 *bucket, u32 *last) ovs_flow_tbl_dump_next() argument
H A Ddatapath.c988 /* Put flow in bucket. */ ovs_flow_cmd_new()
1361 u32 bucket, obj; ovs_flow_cmd_dump() local
1363 bucket = cb->args[0]; ovs_flow_cmd_dump()
1365 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); ovs_flow_cmd_dump()
1375 cb->args[0] = bucket; ovs_flow_cmd_dump()
2115 int bucket = cb->args[0], skip = cb->args[1]; ovs_vport_cmd_dump() local
2124 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { ovs_vport_cmd_dump()
/linux-4.4.14/Documentation/vDSO/
H A Dparse_vdso.c72 ELF(Word) *bucket, *chain;
177 vdso_info.bucket = &hash[2]; vdso_init_from_sysinfo_ehdr()
229 ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; vdso_sym()
/linux-4.4.14/security/selinux/
H A Dnetnode.c77 * the bucket number for the given IP address.
93 * the bucket number for the given IP address.
173 * this bucket to make sure it is within the specified bounds */ sel_netnode_insert()
H A Dnetport.c75 * This is the hashing function for the port table, it returns the bucket
120 * this bucket to make sure it is within the specified bounds */ sel_netport_insert()
H A Dnetif.c53 * bucket number for the given interface.
/linux-4.4.14/arch/hexagon/kernel/
H A Dptrace.c112 unsigned long bucket; genregs_set() local
142 INEXT(&bucket, cause); genregs_set()
143 INEXT(&bucket, badva); genregs_set()
/linux-4.4.14/net/ipv6/
H A Dping.c206 int bucket = ((struct ping_iter_state *) seq->private)->bucket; ping_v6_seq_show() local
210 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); ping_v6_seq_show()
H A Dip6_flowlabel.c697 int bucket; member in struct:ip6fl_iter_state
708 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { ip6fl_get_first()
709 for_each_fl_rcu(state->bucket, fl) { ip6fl_get_first()
730 if (++state->bucket <= FL_HASH_MASK) {
731 for_each_fl_rcu(state->bucket, fl) {
H A Ddatagram.c963 __u16 srcp, __u16 destp, int bucket) ip6_dgram_sock_seq_show()
972 bucket, ip6_dgram_sock_seq_show()
962 ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, __u16 destp, int bucket) ip6_dgram_sock_seq_show() argument
H A Dudp.c1472 int bucket = ((struct udp_iter_state *)seq->private)->bucket; udp6_seq_show() local
1476 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); udp6_seq_show()
H A Daddrconf.c3749 int bucket; member in struct:if6_iter_state
3760 /* initial bucket if pos is 0 */ if6_get_first()
3762 state->bucket = 0; if6_get_first()
3766 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { if6_get_first()
3767 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket], if6_get_first()
3780 /* prepare for next bucket */ if6_get_first()
3800 while (++state->bucket < IN6_ADDR_HSIZE) {
3803 &inet6_addr_lst[state->bucket], addr_lst) {
/linux-4.4.14/net/llc/
H A Dllc_proc.c67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) laddr_hash_next() argument
72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) laddr_hash_next()
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) laddr_hash_next()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dtrace.h331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
332 TP_ARGS(qp, bucket),
336 __field(u32, bucket)
341 __entry->bucket = bucket;
344 "[%s] qpn 0x%x bucket %u",
347 __entry->bucket
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
/linux-4.4.14/security/keys/
H A Dkeyring.c63 unsigned bucket = 0; keyring_hash() local
66 bucket += (unsigned char)*desc; keyring_hash()
68 return bucket & (KEYRING_NAME_HASH_SIZE - 1); keyring_hash()
111 int bucket; keyring_publish_name() local
114 bucket = keyring_hash(keyring->description); keyring_publish_name()
118 if (!keyring_name_hash[bucket].next) keyring_publish_name()
119 INIT_LIST_HEAD(&keyring_name_hash[bucket]); keyring_publish_name()
122 &keyring_name_hash[bucket]); keyring_publish_name()
979 int bucket; find_keyring_by_name() local
984 bucket = keyring_hash(name); find_keyring_by_name()
988 if (keyring_name_hash[bucket].next) { find_keyring_by_name()
989 /* search this hash bucket for a keyring with a matching name find_keyring_by_name()
992 &keyring_name_hash[bucket], find_keyring_by_name()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dradeon_cs.c37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member in struct:radeon_cs_buckets
50 INIT_LIST_HEAD(&b->bucket[i]); radeon_cs_buckets_init()
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); radeon_cs_buckets_add()
71 list_splice(&b->bucket[i], out_list); radeon_cs_buckets_get_list()
/linux-4.4.14/kernel/bpf/
H A Dhashtab.c190 /* key was found, get next key in the same bucket */ htab_map_get_next_key()
200 /* no more elements in this hash list, go to the next bucket */ htab_map_get_next_key()
209 /* pick first element in the bucket */ htab_map_get_next_key()
/linux-4.4.14/drivers/infiniband/core/
H A Dfmr_pool.c64 * The cache_node member is used to link the FMR into a cache bucket
119 struct hlist_head *bucket; ib_fmr_cache_lookup() local
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); ib_fmr_cache_lookup()
127 hlist_for_each_entry(fmr, bucket, cache_node) ib_fmr_cache_lookup()
/linux-4.4.14/drivers/message/fusion/lsi/
H A Dmpi_lan.h24 * Changed transaction context usage to bucket/buffer.
/linux-4.4.14/kernel/
H A Dfutex.c80 * futex_wait(). This function computes the hash bucket and acquires
81 * the hash bucket lock. After that it reads the futex user space value
83 * it enqueues itself into the hash bucket, releases the hash bucket lock
87 * futex_wake(). This function computes the hash bucket and acquires the
88 * hash bucket lock. Then it looks for waiters on that futex in the hash
89 * bucket and wakes them.
115 * and the waker did not find the waiter in the hash bucket queue.
167 * address we always increment the waiters for the destination bucket before
211 * @lock_ptr: the hash bucket lock
259 * The base of the bucket array and its size are always used together
427 * The hash bucket spinlock must not be held. This is
650 * @hb: the hash bucket the futex_q's reside in
1067 * @hb: the pi futex hash bucket
1178 * The hash bucket lock must be held when this is called.
1254 * bucket lock, retry the operation. wake_futex_pi()
1481 * If key1 and key2 hash to the same bucket, no need to requeue_futex()
1529 * @hb1: the from futex hash bucket, must be locked by the caller
1530 * @hb2: the to futex hash bucket, must be locked by the caller
1844 * the requeue we moved futex_q's from the hash bucket at key1 to the futex_requeue()
1894 * @hb: The destination hash bucket
1977 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1995 * Must be called with hash bucket lock held and mm->sem held for non
2024 * here, because we need to drop the hash bucket lock to fixup_pi_state_owner()
2062 * To handle the page fault we need to drop the hash bucket fixup_pi_state_owner()
2067 * after reacquiring the hash bucket lock and before trying to fixup_pi_state_owner()
2124 * way back before we locked the hash bucket. fixup_owner()
2167 * @hb: the futex hash bucket, must be locked by the caller
2227 * Access the page AFTER the hash-bucket is locked. futex_wait_setup()
2235 * any cond. If we locked the hash-bucket after testing *uaddr, that futex_wait_setup()
2239 * On the other hand, we insert q and release the hash-bucket only futex_wait_setup()
2540 * bucket lock. futex_unlock_pi()
/linux-4.4.14/arch/mips/include/asm/netlogic/xlr/
H A Dfmn.h314 static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid, nlm_fmn_receive() argument
319 nlm_msgld(bucket); nlm_fmn_receive()
/linux-4.4.14/tools/power/x86/turbostat/
H A Dturbostat.c1338 * [7:0] -- Base value of number of active cores of bucket 1. dump_knl_turbo_ratio_limits()
1339 * [15:8] -- Base value of freq ratio of bucket 1. dump_knl_turbo_ratio_limits()
1340 * [20:16] -- +ve delta of number of active cores of bucket 2. dump_knl_turbo_ratio_limits()
1341 * i.e. active cores of bucket 2 = dump_knl_turbo_ratio_limits()
1342 * active cores of bucket 1 + delta dump_knl_turbo_ratio_limits()
1343 * [23:21] -- Negative delta of freq ratio of bucket 2. dump_knl_turbo_ratio_limits()
1344 * i.e. freq ratio of bucket 2 = dump_knl_turbo_ratio_limits()
1345 * freq ratio of bucket 1 - delta dump_knl_turbo_ratio_limits()
1346 * [28:24]-- +ve delta of number of active cores of bucket 3. dump_knl_turbo_ratio_limits()
1347 * [31:29]-- -ve delta of freq ratio of bucket 3. dump_knl_turbo_ratio_limits()
1348 * [36:32]-- +ve delta of number of active cores of bucket 4. dump_knl_turbo_ratio_limits()
1349 * [39:37]-- -ve delta of freq ratio of bucket 4. dump_knl_turbo_ratio_limits()
1350 * [44:40]-- +ve delta of number of active cores of bucket 5. dump_knl_turbo_ratio_limits()
1351 * [47:45]-- -ve delta of freq ratio of bucket 5. dump_knl_turbo_ratio_limits()
1352 * [52:48]-- +ve delta of number of active cores of bucket 6. dump_knl_turbo_ratio_limits()
1353 * [55:53]-- -ve delta of freq ratio of bucket 6. dump_knl_turbo_ratio_limits()
1354 * [60:56]-- +ve delta of number of active cores of bucket 7. dump_knl_turbo_ratio_limits()
1355 * [63:61]-- -ve delta of freq ratio of bucket 7. dump_knl_turbo_ratio_limits()
/linux-4.4.14/net/appletalk/
H A Daarp.c911 int bucket; member in struct:aarp_iter_state
923 int ct = iter->bucket; iter_next()
933 iter->bucket = ct; iter_next()
960 iter->bucket = 0; __acquires()
976 /* next entry in current bucket */ aarp_seq_next()
980 /* next bucket or table */ aarp_seq_next()
982 ++iter->bucket; aarp_seq_next()
/linux-4.4.14/net/netfilter/ipset/
H A Dip_set_hash_gen.h55 /* Currently, at listing one hash bucket must fit into a message. tune_ahash_max()
68 /* A hash bucket */
84 struct hbucket __rcu *bucket[0]; /* hashtable buckets */ member in struct:htable
87 #define hbucket(h, i) ((h)->bucket[i])
1131 /* We assume that one hash bucket fills into one page */ mtype_list()
1147 pr_debug("cb->arg bucket: %lu, t %p n %p\n", mtype_list()
1185 pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n", mtype_list()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
H A Devents.c265 /* NB: increase sequence number in current usec bucket, ptlrpc_req_add_history()
267 * sequence and jumped into the next usec bucket (future time), ptlrpc_req_add_history()
268 * then we hope there will be less RPCs per bucket at some ptlrpc_req_add_history()
/linux-4.4.14/net/batman-adv/
H A Dfragmentation.c151 u8 bucket; batadv_frag_insert_packet() local
164 bucket = seqno % BATADV_FRAG_BUFFER_COUNT; batadv_frag_insert_packet()
177 chain = &orig_node->fragments[bucket]; batadv_frag_insert_packet()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
H A Dl2t.h54 * pointer. Finally, each node is a bucket of a hash table, pointing to the
/linux-4.4.14/arch/tile/include/hv/
H A Dnetio_errors.h78 * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
H A Ddrv_mpipe_intf.h106 /** Number of lo bucket chunks available (16). */
110 /** Granularity of lo bucket allocation (256). */
114 /** Number of hi bucket chunks available (16). */
118 /** Granularity of hi bucket allocation (4). */
539 /** The mask for converting a flow hash into a bucket. */
542 /** The offset for converting a flow hash into a bucket. */
H A Ddrv_xgbe_intf.h80 /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
81 * 24 bits of the offset is the base bucket number times the size of a
93 * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
H A Diorpc.h572 /** Cannot allocate bucket. */
575 /** Invalid bucket number. */
H A Dnetio_intf.h271 * @brief A group-to-bucket identifier.
288 /** The base bucket to use to send traffic */
304 * @brief A VLAN-to-bucket identifier.
314 * A bucket-to-queue mapping.
2870 * for a packet, as defined by the group, bucket, and queue configuration,
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_trans_resv.h55 struct xfs_trans_res tr_clearagi; /* clear agi unlinked bucket */
H A Dxfs_trans_resv.c709 * Clearing a bad agino number in an agi hash bucket.
/linux-4.4.14/drivers/crypto/nx/
H A Dnx-842-pseries.c140 int bucket = fls(time); ibm_nx842_incr_hist() local
142 if (bucket) ibm_nx842_incr_hist()
143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); ibm_nx842_incr_hist()
145 atomic64_inc(&times[bucket]); ibm_nx842_incr_hist()
933 /* The last bucket holds everything over nx842_timehist_show()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_cs.c36 /* This is based on the bucket sort with O(n) time complexity.
37 * An item with priority "i" is added to bucket[i]. The lists are then
41 struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; member in struct:amdgpu_cs_buckets
49 INIT_LIST_HEAD(&b->bucket[i]); amdgpu_cs_buckets_init()
60 list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); amdgpu_cs_buckets_add()
70 list_splice(&b->bucket[i], out_list); amdgpu_cs_buckets_get_list()
/linux-4.4.14/drivers/atm/
H A Dhorizon.c180 mbs is max burst size (bucket)
188 be implemented as a (real-number) leaky bucket. The GCRA can be used
226 CBR. Each TX channel has a bucket (containing up to 31 cell units)
349 . Implement VBR (bucket and timers not understood) and ABR (need to
2280 unsigned int bucket; hrz_open()
2317 // bucket calculations (from a piece of paper...) cell bucket hrz_open()
2320 bucket = mbs*(pcr-scr)/pcr; hrz_open()
2321 if (bucket*pcr != mbs*(pcr-scr)) hrz_open()
2322 bucket += 1; hrz_open()
2323 if (bucket > BUCKET_MAX_SIZE) { hrz_open()
2324 PRINTD (DBG_QOS, "shrinking bucket from %u to %u", hrz_open()
2325 bucket, BUCKET_MAX_SIZE); hrz_open()
2326 bucket = BUCKET_MAX_SIZE; hrz_open()
2329 vcc.tx_bucket_bits = bucket; hrz_open()
/linux-4.4.14/net/decnet/
H A Ddn_route.c1780 int bucket; member in struct:dn_rt_cache_iter_state
1788 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { dn_rt_cache_get_first()
1790 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); dn_rt_cache_get_first()
1805 if (--s->bucket < 0) dn_rt_cache_get_next()
1808 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); dn_rt_cache_get_next()
H A Daf_decnet.c2117 int bucket; member in struct:dn_iter_state
2125 for(state->bucket = 0; dn_socket_get_first()
2126 state->bucket < DN_SK_HASH_SIZE; dn_socket_get_first()
2127 ++state->bucket) { dn_socket_get_first()
2128 n = sk_head(&dn_sk_hash[state->bucket]); dn_socket_get_first()
2145 if (++state->bucket >= DN_SK_HASH_SIZE) dn_socket_get_next()
2147 n = sk_head(&dn_sk_hash[state->bucket]); dn_socket_get_next()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
H A Den_netdev.c636 struct hlist_head *bucket; mlx4_en_replace_mac() local
642 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry_safe()
643 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { hlist_for_each_entry_safe()
1054 struct hlist_head *bucket; mlx4_en_do_uc_filter() local
1065 bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe()
1066 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { hlist_for_each_entry_safe()
1109 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; hlist_for_each_entry()
1110 hlist_for_each_entry(entry, bucket, hlist) { hlist_for_each_entry()
1150 bucket = &priv->mac_hash[mac_hash];
1151 hlist_add_head_rcu(&entry->hlist, bucket);
1269 struct hlist_head *bucket; mlx4_en_delete_rss_steer_rules() local
1274 bucket = &priv->mac_hash[i]; hlist_for_each_entry_safe()
1275 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { hlist_for_each_entry_safe()
H A Den_rx.c816 struct hlist_head *bucket; mlx4_en_process_rx_cq() local
821 bucket = &priv->mac_hash[mac_hash]; mlx4_en_process_rx_cq()
823 hlist_for_each_entry_rcu(entry, bucket, hlist) { hlist_for_each_entry_rcu()
/linux-4.4.14/fs/ocfs2/dlm/
H A Ddlmdomain.c171 struct hlist_head *bucket; __dlm_insert_lockres() local
177 bucket = dlm_lockres_hash(dlm, q->hash); __dlm_insert_lockres()
182 hlist_add_head(&res->hash_node, bucket); __dlm_insert_lockres()
193 struct hlist_head *bucket; __dlm_lookup_lockres_full() local
200 bucket = dlm_lockres_hash(dlm, hash); __dlm_lookup_lockres_full()
202 hlist_for_each_entry(res, bucket, hash_node) { hlist_for_each_entry()
425 struct hlist_head *bucket; dlm_migrate_all_locks() local
435 bucket = dlm_lockres_hash(dlm, i); dlm_migrate_all_locks()
436 iter = bucket->first; dlm_migrate_all_locks()
H A Ddlmmaster.c330 struct hlist_head *bucket; __dlm_insert_mle() local
334 bucket = dlm_master_hash(dlm, mle->mnamehash); __dlm_insert_mle()
335 hlist_add_head(&mle->master_hash_node, bucket); __dlm_insert_mle()
344 struct hlist_head *bucket; dlm_find_mle() local
350 bucket = dlm_master_hash(dlm, hash); hlist_for_each_entry()
351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { hlist_for_each_entry()
900 /* finally add the lockres to its hash bucket */ dlm_get_lock_resource()
3264 struct hlist_head *bucket; dlm_clean_master_list() local
3275 bucket = dlm_master_hash(dlm, i); hlist_for_each_entry_safe()
3276 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { hlist_for_each_entry_safe()
3464 struct hlist_head *bucket; dlm_force_free_mles() local
3481 bucket = dlm_master_hash(dlm, i); hlist_for_each_entry_safe()
3482 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { hlist_for_each_entry_safe()
H A Ddlmdebug.c444 struct hlist_head *bucket; debug_mle_print() local
453 bucket = dlm_master_hash(dlm, i); hlist_for_each_entry()
454 hlist_for_each_entry(mle, bucket, master_hash_node) { hlist_for_each_entry()
H A Ddlmrecovery.c2127 struct hlist_head *bucket; dlm_finish_local_lockres_recovery() local
2156 bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry()
2157 hlist_for_each_entry(res, bucket, hash_node) { hlist_for_each_entry()
2318 struct hlist_head *bucket; dlm_do_local_recovery_cleanup() local
2340 bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry()
2341 hlist_for_each_entry(res, bucket, hash_node) { hlist_for_each_entry()
/linux-4.4.14/drivers/staging/netlogic/
H A Dxlr_net.c254 int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */ xlr_make_tx_desc()
525 * Setup the Message ring credits, bucket size and other
573 int bkts[32]; /* one bucket is assumed for each cpu */ xlr_config_translate_table()
581 (use_bkt) ? "bucket" : "class"); xlr_config_translate_table()
585 /* for each cpu, mark the 4+threadid bucket */ xlr_config_translate_table()
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
H A Dvvp_dev.c261 * - file hash bucket in lu_site::ls_hash[] 28bits
263 * - how far file is from bucket head 4bits
/linux-4.4.14/fs/nfsd/
H A Dnfscache.c26 * of entries, then this should be the average number of entries per bucket.
107 * the "target" max bucket size, and round up to next power of two.
/linux-4.4.14/mm/
H A Dhighmem.c394 * Hash table bucket
398 spinlock_t lock; /* Protect this bucket's list */
/linux-4.4.14/net/dccp/
H A Dminisocks.c69 DCCP_WARN("time wait bucket table overflow\n"); dccp_time_wait()
H A Dproto.c213 /* Clean up a referenced DCCP bind bucket. */ dccp_destroy_sock()
1128 * Size and allocate the main established and bind bucket dccp_init()
/linux-4.4.14/net/ieee802154/6lowpan/
H A Drx.c86 * bucket. lowpan_rx_h_frag()
/linux-4.4.14/tools/perf/bench/
H A Dfutex-wake-parallel.c31 /* all threads will block on the same futex -- hash bucket chaos ;) */
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_82599.c1243 * Set the maximum length per hash bucket to 0xA filters ixgbe_init_fdir_signature_82599()
1269 * Set the maximum length per hash bucket to 0xA filters ixgbe_init_fdir_perfect_82599()
1363 /* combine common_hash result with signature and bucket hashes */ ixgbe_atr_compute_sig_hash_82599()
1496 * Limit hash to 13 bits since max bucket count is 8K. ixgbe_atr_compute_perfect_hash_82599()
1557 /* verify bucket hash is cleared on hash generation */ ixgbe_fdir_set_input_mask_82599()
1559 hw_dbg(hw, " bucket hash should always be 0 in mask\n"); ixgbe_fdir_set_input_mask_82599()
/linux-4.4.14/fs/btrfs/
H A Draid56.c350 int bucket = rbio_bucket(rbio); __remove_rbio_from_cache() local
362 h = table->table + bucket; __remove_rbio_from_cache()
364 /* hold the lock for the bucket because we may be __remove_rbio_from_cache()
660 int bucket = rbio_bucket(rbio); lock_stripe_add() local
661 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; lock_stripe_add()
749 int bucket; unlock_stripe() local
754 bucket = rbio_bucket(rbio); unlock_stripe()
755 h = rbio->fs_info->stripe_hash_table->table + bucket; unlock_stripe()
/linux-4.4.14/arch/powerpc/mm/
H A Dhash_native_64.c300 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less native_hpte_updatepp()
415 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less native_hpte_invalidate()
/linux-4.4.14/arch/s390/kernel/
H A Dkprobes.c423 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
442 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/net/netlabel/
H A Dnetlabel_domainhash.c110 * correct bucket number for the domain. The caller is responsible for
745 * @skip_chain: the number of entries to skip in the first iterated bucket
/linux-4.4.14/arch/x86/pci/
H A Dintel_mid_pci.c17 * actually exists, otherwise return all 1s for reads and bit bucket
/linux-4.4.14/drivers/staging/lustre/lustre/include/
H A Dlu_object.h557 * number of object in this bucket on the lsb_lru list.
562 * bucket lock of lu_site::ls_obj_hash.
607 * index of bucket on hash table while purging
H A Dlustre_dlm.h371 /** big refcount (by bucket) */
628 * Protected by per-bucket exp->exp_lock_hash locks.
633 * Protected by per-bucket exp->exp_flock_hash locks.
/linux-4.4.14/drivers/media/v4l2-core/
H A Dv4l2-ctrls.c1781 int bucket; find_ref() local
1788 bucket = id % hdl->nr_of_buckets; find_ref()
1795 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; find_ref()
1835 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ handler_new_ref() local
1890 new_ref->next = hdl->buckets[bucket]; handler_new_ref()
1891 hdl->buckets[bucket] = new_ref; handler_new_ref()
/linux-4.4.14/drivers/net/ethernet/sfc/
H A Dmcdi_pcol.h7255 * Allocate a pacer bucket (for qau rp or a snapper test)
7266 /* the bucket id */
7272 * Free a pacer bucket
7280 /* the bucket id */
7289 * Initialise pacer bucket with a given rate
7297 /* the bucket id */
7304 /* the bucket id */
7337 /* the reaction point (RP) bucket */
7339 /* an already reserved bucket (typically set to bucket associated with outer
7343 /* an already reserved bucket (typically set to bucket associated with inner
7347 /* the min bucket (typically for ETS/minimum bandwidth) */
7364 /* the reaction point (RP) bucket */
7366 /* an already reserved bucket (typically set to bucket associated with outer
7370 /* an already reserved bucket (typically set to bucket associated with inner
7374 /* the min bucket (typically for ETS/minimum bandwidth) */
/linux-4.4.14/arch/x86/kernel/
H A Dapm_32.c913 unsigned int bucket; apm_cpu_idle() local
933 bucket = IDLE_LEAKY_MAX; apm_cpu_idle()
944 if (bucket) { apm_cpu_idle()
945 bucket = IDLE_LEAKY_MAX; apm_cpu_idle()
948 } else if (bucket) { apm_cpu_idle()
949 bucket--; apm_cpu_idle()
/linux-4.4.14/net/unix/
H A Daf_unix.c2735 unsigned long bucket = get_bucket(*pos); unix_from_bucket() local
2739 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { unix_from_bucket()
2753 unsigned long bucket; unix_next_socket() local
2769 bucket = get_bucket(*pos) + 1; unix_next_socket()
2770 *pos = set_bucket_offset(bucket, 1); unix_next_socket()
2771 } while (bucket < ARRAY_SIZE(unix_socket_table)); unix_next_socket()
/linux-4.4.14/net/sunrpc/
H A Dcache.c419 /* find a non-empty bucket in the table */ cache_clean()
425 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ cache_clean()
/linux-4.4.14/drivers/pci/hotplug/
H A Dibmphp.h369 * from the Memory bucket rather than from PFMem */
/linux-4.4.14/arch/tile/kernel/
H A Dkprobes.c472 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/arch/arc/kernel/
H A Dkprobes.c464 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/arch/powerpc/kernel/
H A Dkprobes.c318 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/arch/sh/kernel/
H A Dkprobes.c335 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/drivers/message/fusion/
H A Dmptlan.c519 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " mpt_lan_close()
954 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x " mpt_lan_receive_post_free()
/linux-4.4.14/drivers/block/
H A Dcciss.c213 static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
3904 * scatter gather elements supported) and bucket[],
3905 * which is an array of 8 integers. The bucket[] array
3915 static void calc_bucket_map(int bucket[], int num_buckets, calc_bucket_map() argument
3927 b = num_buckets; /* Assume the biggest bucket */ calc_bucket_map()
3928 /* Find the bucket that is just big enough */ calc_bucket_map()
3930 if (bucket[j] >= size) { calc_bucket_map()
3935 /* for a command with i SG entries, use bucket b. */ calc_bucket_map()
/linux-4.4.14/arch/x86/kernel/kprobes/
H A Dcore.c747 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
766 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/net/ceph/
H A Dosdmap.c207 dout("crush_decode bucket %d off %x %p to %p\n", crush_decode()
243 dout("crush_decode bucket size %d off %x %p to %p\n", crush_decode()
/linux-4.4.14/arch/ia64/kernel/
H A Dkprobes.c449 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
466 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/fs/gfs2/
H A Dglock.c1410 * examine_bucket - Call a function for glock in a hash bucket
1413 * @bucket: the bucket
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/
H A Dvsc7326_reg.h83 * bn = bucket number 0-10 (yes, 11 buckets)
/linux-4.4.14/arch/powerpc/platforms/pseries/
H A Dlpar.c183 * bucket bit here as well pSeries_lpar_hpte_insert()
/linux-4.4.14/arch/mips/kernel/
H A Dkprobes.c626 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/arch/arm/probes/kprobes/
H A Dcore.c455 /* another task is sharing our hash bucket */ hlist_for_each_entry_safe()
/linux-4.4.14/drivers/scsi/
H A Dhpsa.c267 static void calc_bucket_map(int *bucket, int num_buckets,
8791 * scatter gather elements supported) and bucket[],
8792 * which is an array of 8 integers. The bucket[] array
8802 static void calc_bucket_map(int bucket[], int num_buckets, calc_bucket_map() argument
8811 b = num_buckets; /* Assume the biggest bucket */ calc_bucket_map()
8812 /* Find the bucket that is just big enough */ calc_bucket_map()
8814 if (bucket[j] >= size) { calc_bucket_map()
8819 /* for a command with i SG entries, use bucket b. */ calc_bucket_map()
/linux-4.4.14/kernel/time/
H A Dtimer.c1286 /* Look at the cascade bucket(s)? */ __next_timer_interrupt()
1324 /* Look at the cascade bucket(s)? */ __next_timer_interrupt()
/linux-4.4.14/drivers/scsi/megaraid/
H A Dmegaraid_sas.h816 * Define ECC single-bit-error bucket information
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_resource.c558 /** hash bucket bits */
/linux-4.4.14/drivers/scsi/aic7xxx/
H A Daic79xx_pci.c772 "%s: Split completion data bucket in %s\n",
/linux-4.4.14/drivers/irqchip/
H A Dirq-gic.c1196 * at its normal offset. Please pass me that bucket. gic_check_eoimode()
/linux-4.4.14/drivers/block/aoe/
H A Daoecmd.c798 * Hang all frames on first hash bucket for downdev
/linux-4.4.14/fs/lockd/
H A Dsvclock.c702 * with the pid in order to create a key value for picking a hash bucket.
/linux-4.4.14/fs/nfs/filelayout/
H A Dfilelayout.c1047 /* Linearly search the commit lists for each bucket until a matching filelayout_search_commit_reqs()
/linux-4.4.14/fs/afs/
H A Dinternal.h242 uint8_t hash_bucket; /* which hash bucket this represents */

Completed in 7863 milliseconds

12