Home
last modified time | relevance | path

Searched refs:bucket (Results 1 – 118 of 118) sorted by relevance

/linux-4.4.14/net/ceph/crush/
Dmapper.c73 static int bucket_perm_choose(struct crush_bucket *bucket, in bucket_perm_choose() argument
76 unsigned int pr = r % bucket->size; in bucket_perm_choose()
80 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) { in bucket_perm_choose()
81 dprintk("bucket %d new x=%d\n", bucket->id, x); in bucket_perm_choose()
82 bucket->perm_x = x; in bucket_perm_choose()
86 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose()
87 bucket->size; in bucket_perm_choose()
88 bucket->perm[0] = s; in bucket_perm_choose()
89 bucket->perm_n = 0xffff; /* magic value, see below */ in bucket_perm_choose()
93 for (i = 0; i < bucket->size; i++) in bucket_perm_choose()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/
Dlustre_handles.c66 struct handle_bucket *bucket; in class_handle_hash() local
93 bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK]; in class_handle_hash()
94 spin_lock(&bucket->lock); in class_handle_hash()
95 list_add_rcu(&h->h_link, &bucket->head); in class_handle_hash()
97 spin_unlock(&bucket->lock); in class_handle_hash()
127 struct handle_bucket *bucket; in class_handle_unhash() local
129 bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK); in class_handle_unhash()
131 spin_lock(&bucket->lock); in class_handle_unhash()
133 spin_unlock(&bucket->lock); in class_handle_unhash()
139 struct handle_bucket *bucket; in class_handle2object() local
[all …]
/linux-4.4.14/net/sched/
Dsch_hhf.c328 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) in dequeue_head() argument
330 struct sk_buff *skb = bucket->head; in dequeue_head()
332 bucket->head = skb->next; in dequeue_head()
338 static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) in bucket_add() argument
340 if (bucket->head == NULL) in bucket_add()
341 bucket->head = skb; in bucket_add()
343 bucket->tail->next = skb; in bucket_add()
344 bucket->tail = skb; in bucket_add()
351 struct wdrr_bucket *bucket; in hhf_drop() local
354 bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; in hhf_drop()
[all …]
/linux-4.4.14/net/9p/
Derror.c196 int bucket; in p9_error_init() local
199 for (bucket = 0; bucket < ERRHASHSZ; bucket++) in p9_error_init()
200 INIT_HLIST_HEAD(&hash_errmap[bucket]); in p9_error_init()
205 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; in p9_error_init()
207 hlist_add_head(&c->list, &hash_errmap[bucket]); in p9_error_init()
225 int bucket; in p9_errstr2errno() local
229 bucket = jhash(errstr, len, 0) % ERRHASHSZ; in p9_errstr2errno()
230 hlist_for_each_entry(c, &hash_errmap[bucket], list) { in p9_errstr2errno()
/linux-4.4.14/drivers/cpuidle/governors/
Dmenu.c127 unsigned int bucket; member
144 int bucket = 0; in which_bucket() local
153 bucket = BUCKETS/2; in which_bucket()
156 return bucket; in which_bucket()
158 return bucket + 1; in which_bucket()
160 return bucket + 2; in which_bucket()
162 return bucket + 3; in which_bucket()
164 return bucket + 4; in which_bucket()
165 return bucket + 5; in which_bucket()
307 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); in menu_select()
[all …]
/linux-4.4.14/fs/dlm/
Ddebug_fs.c371 unsigned bucket; member
430 unsigned bucket, entry; in table_seq_start() local
433 bucket = n >> 32; in table_seq_start()
436 if (bucket >= ls->ls_rsbtbl_size) in table_seq_start()
453 tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; in table_seq_start()
455 spin_lock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
462 ri->bucket = bucket; in table_seq_start()
463 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
468 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in table_seq_start()
478 bucket++; in table_seq_start()
[all …]
Ddir.c203 uint32_t hash, bucket; in find_rsb_root() local
207 bucket = hash & (ls->ls_rsbtbl_size - 1); in find_rsb_root()
209 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root()
210 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); in find_rsb_root()
212 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, in find_rsb_root()
214 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_rsb_root()
Dlock.c352 uint32_t bucket = r->res_bucket; in put_rsb() local
354 spin_lock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in put_rsb()
5470 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) in find_grant_rsb() argument
5475 spin_lock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5476 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { in find_grant_rsb()
5486 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5489 spin_unlock(&ls->ls_rsbtbl[bucket].lock); in find_grant_rsb()
5513 int bucket = 0; in dlm_recover_grant() local
5519 r = find_grant_rsb(ls, bucket); in dlm_recover_grant()
[all …]
/linux-4.4.14/fs/nfs/
Dpnfs_nfs.c76 struct pnfs_commit_bucket *bucket; in pnfs_generic_clear_request_commit() local
78 bucket = list_first_entry(&req->wb_list, in pnfs_generic_clear_request_commit()
81 freeme = bucket->wlseg; in pnfs_generic_clear_request_commit()
82 bucket->wlseg = NULL; in pnfs_generic_clear_request_commit()
114 pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, in pnfs_generic_scan_ds_commit_list() argument
118 struct list_head *src = &bucket->written; in pnfs_generic_scan_ds_commit_list()
119 struct list_head *dst = &bucket->committing; in pnfs_generic_scan_ds_commit_list()
127 if (bucket->clseg == NULL) in pnfs_generic_scan_ds_commit_list()
128 bucket->clseg = pnfs_get_lseg(bucket->wlseg); in pnfs_generic_scan_ds_commit_list()
130 pnfs_put_lseg_locked(bucket->wlseg); in pnfs_generic_scan_ds_commit_list()
[all …]
/linux-4.4.14/lib/
Ddma-debug.c268 static void put_hash_bucket(struct hash_bucket *bucket, in put_hash_bucket() argument
273 spin_unlock_irqrestore(&bucket->lock, __flags); in put_hash_bucket()
298 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, in __hash_bucket_find() argument
305 list_for_each_entry(entry, &bucket->list, list) { in __hash_bucket_find()
348 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, in bucket_find_exact() argument
351 return __hash_bucket_find(bucket, ref, exact_match); in bucket_find_exact()
354 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, in bucket_find_contain() argument
364 entry = __hash_bucket_find(*bucket, ref, containing_match); in bucket_find_contain()
372 put_hash_bucket(*bucket, flags); in bucket_find_contain()
375 *bucket = get_hash_bucket(&index, flags); in bucket_find_contain()
[all …]
/linux-4.4.14/arch/mips/netlogic/xlr/
Dfmn.c72 int bucket, rv; in fmn_message_handler() local
86 for (bucket = 0; bucket < 8; bucket++) { in fmn_message_handler()
88 if (bkt_status & (1 << bucket)) in fmn_message_handler()
90 rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid, in fmn_message_handler()
101 hndlr->action(bucket, src_stnid, size, code, in fmn_message_handler()
/linux-4.4.14/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c29 unsigned int bucket; member
38 for (st->bucket = 0; in ct_get_first()
39 st->bucket < net->ct.htable_size; in ct_get_first()
40 st->bucket++) { in ct_get_first()
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
57 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next()
58 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_next()
218 unsigned int bucket; member
227 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first()
[all …]
/linux-4.4.14/fs/ocfs2/
Dxattr.c135 struct ocfs2_xattr_bucket *bucket; member
289 struct ocfs2_xattr_bucket *bucket,
311 struct ocfs2_xattr_bucket *bucket,
332 struct ocfs2_xattr_bucket *bucket; in ocfs2_xattr_bucket_new() local
337 bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS); in ocfs2_xattr_bucket_new()
338 if (bucket) { in ocfs2_xattr_bucket_new()
339 bucket->bu_inode = inode; in ocfs2_xattr_bucket_new()
340 bucket->bu_blocks = blks; in ocfs2_xattr_bucket_new()
343 return bucket; in ocfs2_xattr_bucket_new()
346 static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket) in ocfs2_xattr_bucket_relse() argument
[all …]
Docfs2_trace.h1791 unsigned int hash, unsigned long long bucket, \
1793 TP_ARGS(ino, name, name_index, hash, bucket, xe_index))
/linux-4.4.14/drivers/md/bcache/
Dalloc.c74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen()
87 struct bucket *b; in bch_rescale_priorities()
124 static inline bool can_inc_bucket_gen(struct bucket *b) in can_inc_bucket_gen()
129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket()
139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket()
152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket()
180 struct bucket *b; in invalidate_buckets_lru()
217 struct bucket *b; in invalidate_buckets_fifo()
240 struct bucket *b; in invalidate_buckets_random()
298 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() argument
[all …]
Dbcache.h194 struct bucket { struct
207 BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
213 BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
214 BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
419 struct bucket *buckets;
421 DECLARE_HEAP(struct bucket *, heap);
729 static inline struct bucket *PTR_BUCKET(struct cache_set *c, in PTR_BUCKET()
827 static inline uint8_t bucket_gc_gen(struct bucket *b) in bucket_gc_gen()
862 uint8_t bch_inc_gen(struct cache *, struct bucket *);
865 bool bch_can_invalidate_bucket(struct cache *, struct bucket *);
[all …]
Dextents.c53 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid() local
57 bucket < ca->sb.first_bucket || in __ptr_invalid()
58 bucket >= ca->sb.nbuckets) in __ptr_invalid()
74 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status() local
79 if (bucket < ca->sb.first_bucket) in bch_ptr_status()
81 if (bucket >= ca->sb.nbuckets) in bch_ptr_status()
175 struct bucket *g; in btree_ptr_bad_expensive()
506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
536 struct bucket *g; in bch_extent_bad()
Dmovinggc.c188 static bool bucket_cmp(struct bucket *l, struct bucket *r) in bucket_cmp()
195 struct bucket *b; in bucket_heap_top()
202 struct bucket *b; in bch_moving_gc()
Dsuper.c501 static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) in prio_io() argument
508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; in prio_io()
524 struct bucket *b; in bch_prio_write()
540 long bucket; in bch_prio_write() local
556 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true); in bch_prio_write()
557 BUG_ON(bucket == -1); in bch_prio_write()
560 prio_io(ca, bucket, REQ_WRITE); in bch_prio_write()
563 ca->prio_buckets[i] = bucket; in bch_prio_write()
564 atomic_dec_bug(&ca->buckets[bucket].pin); in bch_prio_write()
587 static void prio_read(struct cache *ca, uint64_t bucket) in prio_read() argument
[all …]
Dbtree.c1176 struct bucket *g; in __bch_btree_mark_key()
1232 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key()
1633 struct bucket *b; in btree_gc_start()
1659 struct bucket *b; in bch_btree_gc_finish()
1851 struct bucket *b; in bch_initial_gc_finish()
Djournal.c44 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); in journal_read_bucket() local
55 bio->bi_iter.bi_sector = bucket + offset; in journal_read_bucket()
Dsysfs.c761 struct bucket *b; in SHOW()
/linux-4.4.14/include/trace/events/
Dbcache.h67 __field(size_t, bucket )
71 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
74 TP_printk("bucket %zu", __entry->bucket)
245 __field(size_t, bucket )
251 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
256 TP_printk("bucket %zu", __entry->bucket)
347 __field(size_t, bucket )
352 __entry->bucket = PTR_BUCKET_NR(b->c, &b->key, 0);
356 TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
406 TP_PROTO(struct cache *ca, size_t bucket),
[all …]
/linux-4.4.14/drivers/md/persistent-data/
Ddm-transaction-manager.c106 unsigned bucket = dm_hash_block(b, DM_HASH_MASK); in is_shadow() local
110 hlist_for_each_entry(si, tm->buckets + bucket, hlist) in is_shadow()
126 unsigned bucket; in insert_shadow() local
132 bucket = dm_hash_block(b, DM_HASH_MASK); in insert_shadow()
134 hlist_add_head(&si->hlist, tm->buckets + bucket); in insert_shadow()
143 struct hlist_head *bucket; in wipe_shadow_table() local
148 bucket = tm->buckets + i; in wipe_shadow_table()
149 hlist_for_each_entry_safe(si, tmp, bucket, hlist) in wipe_shadow_table()
152 INIT_HLIST_HEAD(bucket); in wipe_shadow_table()
/linux-4.4.14/arch/sparc/kernel/
Dirq_64.c206 struct ino_bucket bucket; member
257 struct ino_bucket *bucket; in cookie_exists() local
268 bucket = (struct ino_bucket *) __va(cookie); in cookie_exists()
269 irq = bucket->__irq; in cookie_exists()
278 struct ino_bucket *bucket; in sysino_exists() local
281 bucket = &ivector_table[sysino]; in sysino_exists()
282 irq = bucket_get_irq(__pa(bucket)); in sysino_exists()
615 struct ino_bucket *bucket; in build_irq() local
622 bucket = &ivector_table[ino]; in build_irq()
623 irq = bucket_get_irq(__pa(bucket)); in build_irq()
[all …]
/linux-4.4.14/fs/
Dmbcache.c583 unsigned int bucket; in mb_cache_entry_insert() local
590 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), in mb_cache_entry_insert()
592 block_hash_p = &cache->c_block_hash[bucket]; in mb_cache_entry_insert()
609 bucket = hash_long(key, cache->c_bucket_bits); in mb_cache_entry_insert()
610 index_hash_p = &cache->c_index_hash[bucket]; in mb_cache_entry_insert()
664 unsigned int bucket; in mb_cache_entry_get() local
669 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), in mb_cache_entry_get()
671 block_hash_p = &cache->c_block_hash[bucket]; in mb_cache_entry_get()
789 unsigned int bucket = hash_long(key, cache->c_bucket_bits); in mb_cache_entry_find_first() local
794 index_hash_p = &cache->c_index_hash[bucket]; in mb_cache_entry_find_first()
[all …]
Dseq_file.c1004 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu() local
1006 if (!hlist_empty(bucket)) in seq_hlist_next_percpu()
1007 return bucket->first; in seq_hlist_next_percpu()
/linux-4.4.14/net/atm/
Dproc.c69 int bucket; member
79 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument
84 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk()
85 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk()
99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk()
100 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk()
111 return __vcc_walk(&state->sk, state->family, &state->bucket, l) ? in vcc_walk()
/linux-4.4.14/drivers/misc/vmw_vmci/
Dvmci_doorbell.c128 u32 bucket = VMCI_DOORBELL_HASH(idx); in dbell_index_table_find() local
131 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], in dbell_index_table_find()
147 u32 bucket; in dbell_index_table_add() local
195 bucket = VMCI_DOORBELL_HASH(entry->idx); in dbell_index_table_add()
196 hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]); in dbell_index_table_add()
359 u32 bucket = VMCI_DOORBELL_HASH(notify_idx); in dbell_fire_entries() local
364 hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) { in dbell_fire_entries()
/linux-4.4.14/arch/tile/include/gxio/
Dmpipe.h732 unsigned int bucket,
764 unsigned int bucket,
777 int ring, int bucket, unsigned int count) in gxio_mpipe_credit() argument
792 offset.bucket = bucket; in gxio_mpipe_credit()
794 offset.bucket_enable = (bucket >= 0); in gxio_mpipe_credit()
978 unsigned int bucket,
Diorpc_mpipe.h96 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket,
/linux-4.4.14/net/netfilter/
Dxt_hashlimit.c751 unsigned int *bucket; in dl_seq_start() local
757 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC); in dl_seq_start()
758 if (!bucket) in dl_seq_start()
761 *bucket = *pos; in dl_seq_start()
762 return bucket; in dl_seq_start()
768 unsigned int *bucket = (unsigned int *)v; in dl_seq_next() local
770 *pos = ++(*bucket); in dl_seq_next()
775 return bucket; in dl_seq_next()
782 unsigned int *bucket = (unsigned int *)v; in dl_seq_stop() local
784 if (!IS_ERR(bucket)) in dl_seq_stop()
[all …]
Dnf_conntrack_standalone.c51 unsigned int bucket; member
61 for (st->bucket = 0; in ct_get_first()
62 st->bucket < net->ct.htable_size; in ct_get_first()
63 st->bucket++) { in ct_get_first()
64 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
79 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next()
80 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
85 &net->ct.hash[st->bucket])); in ct_get_next()
Dnf_conntrack_expect.c466 unsigned int bucket; member
475 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first()
476 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); in ct_expect_get_first()
491 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next()
493 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); in ct_expect_get_next()
Dxt_recent.c479 unsigned int bucket; member
492 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start()
493 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start()
506 while (head == &t->iphash[st->bucket]) { in recent_seq_next()
507 if (++st->bucket >= ip_list_hash_size) in recent_seq_next()
509 head = t->iphash[st->bucket].next; in recent_seq_next()
Dnf_conntrack_core.c453 unsigned int bucket = hash_bucket(hash, net); in ____nf_conntrack_find() local
460 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { in ____nf_conntrack_find()
473 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find()
1374 void *data, unsigned int *bucket) in get_next_corpse() argument
1382 for (; *bucket < net->ct.htable_size; (*bucket)++) { in get_next_corpse()
1383 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; in get_next_corpse()
1386 if (*bucket < net->ct.htable_size) { in get_next_corpse()
1387 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { in get_next_corpse()
1423 unsigned int bucket = 0; in nf_ct_iterate_cleanup() local
1425 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { in nf_ct_iterate_cleanup()
[all …]
Dnfnetlink_log.c958 unsigned int bucket; member
969 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first()
970 struct hlist_head *head = &log->instance_table[st->bucket]; in get_first()
986 if (++st->bucket >= INSTANCE_BUCKETS) in get_next()
990 head = &log->instance_table[st->bucket]; in get_next()
Dnfnetlink_queue.c1262 unsigned int bucket; member
1276 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first()
1277 if (!hlist_empty(&q->instance_table[st->bucket])) in get_first()
1278 return q->instance_table[st->bucket].first; in get_first()
1292 if (++st->bucket >= INSTANCE_BUCKETS) in get_next()
1296 h = q->instance_table[st->bucket].first; in get_next()
/linux-4.4.14/net/netfilter/ipvs/
Dip_vs_lblc.c107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member
175 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash()
188 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get()
242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush()
271 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check()
327 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire()
366 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
Dip_vs_lblcr.c277 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member
338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash()
351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get()
408 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush()
436 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check()
491 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire()
529 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
Dip_vs_ctl.c1871 int bucket; member
1907 iter->bucket = idx; in ip_vs_info_array()
1919 iter->bucket = idx; in ip_vs_info_array()
1955 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { in ip_vs_info_seq_next()
1957 &ip_vs_svc_table[iter->bucket], in ip_vs_info_seq_next()
1964 iter->bucket = -1; in ip_vs_info_seq_next()
1974 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { in ip_vs_info_seq_next()
1976 &ip_vs_svc_fwm_table[iter->bucket], in ip_vs_info_seq_next()
/linux-4.4.14/net/mac80211/
Dmesh_pathtbl.c339 struct hlist_head *bucket; in mpath_lookup() local
342 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; in mpath_lookup()
343 hlist_for_each_entry_rcu(node, bucket, list) { in mpath_lookup()
534 struct hlist_head *bucket; in mesh_path_add() local
553 bucket = &tbl->hash_buckets[hash_idx]; in mesh_path_add()
557 hlist_for_each_entry(node, bucket, list) { in mesh_path_add()
586 hlist_add_head_rcu(&new_node->list, bucket); in mesh_path_add()
668 struct hlist_head *bucket; in mpp_path_add() local
703 bucket = &tbl->hash_buckets[hash_idx]; in mpp_path_add()
708 hlist_for_each_entry(node, bucket, list) { in mpp_path_add()
[all …]
Dmesh.c180 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i]); in mesh_rmc_init()
194 list_for_each_entry_safe(p, n, &rmc->bucket[i], list) { in mesh_rmc_free()
229 list_for_each_entry_safe(p, n, &rmc->bucket[idx], list) { in mesh_rmc_check()
247 list_add(&p->list, &rmc->bucket[idx]); in mesh_rmc_check()
Dmesh.h189 struct list_head bucket[RMC_BUCKETS]; member
/linux-4.4.14/net/openvswitch/
Dvport.c109 struct hlist_head *bucket = hash_bucket(net, name); in ovs_vport_locate() local
112 hlist_for_each_entry_rcu(vport, bucket, hash_node) in ovs_vport_locate()
207 struct hlist_head *bucket; in ovs_vport_add() local
218 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add()
220 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
Dflow_table.h75 u32 *bucket, u32 *idx);
Dflow_table.c292 u32 *bucket, u32 *last) in ovs_flow_tbl_dump_next() argument
300 while (*bucket < ti->n_buckets) { in ovs_flow_tbl_dump_next()
302 head = flex_array_get(ti->buckets, *bucket); in ovs_flow_tbl_dump_next()
311 (*bucket)++; in ovs_flow_tbl_dump_next()
Ddatapath.c1361 u32 bucket, obj; in ovs_flow_cmd_dump() local
1363 bucket = cb->args[0]; in ovs_flow_cmd_dump()
1365 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); in ovs_flow_cmd_dump()
1375 cb->args[0] = bucket; in ovs_flow_cmd_dump()
2115 int bucket = cb->args[0], skip = cb->args[1]; in ovs_vport_cmd_dump() local
2124 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { in ovs_vport_cmd_dump()
/linux-4.4.14/security/keys/
Dkeyring.c63 unsigned bucket = 0; in keyring_hash() local
66 bucket += (unsigned char)*desc; in keyring_hash()
68 return bucket & (KEYRING_NAME_HASH_SIZE - 1); in keyring_hash()
111 int bucket; in keyring_publish_name() local
114 bucket = keyring_hash(keyring->description); in keyring_publish_name()
118 if (!keyring_name_hash[bucket].next) in keyring_publish_name()
119 INIT_LIST_HEAD(&keyring_name_hash[bucket]); in keyring_publish_name()
122 &keyring_name_hash[bucket]); in keyring_publish_name()
979 int bucket; in find_keyring_by_name() local
984 bucket = keyring_hash(name); in find_keyring_by_name()
[all …]
/linux-4.4.14/arch/hexagon/kernel/
Dptrace.c112 unsigned long bucket; in genregs_set() local
142 INEXT(&bucket, cause); in genregs_set()
143 INEXT(&bucket, badva); in genregs_set()
/linux-4.4.14/fs/xfs/
Dxfs_fsops.c156 int bucket; in xfs_growfs_data_private() local
280 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++) in xfs_growfs_data_private()
281 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK); in xfs_growfs_data_private()
317 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) in xfs_growfs_data_private()
318 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xfs_growfs_data_private()
Dxfs_log_recover.c1583 struct list_head *bucket; in xlog_recover_buffer_pass1() local
1598 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); in xlog_recover_buffer_pass1()
1599 list_for_each_entry(bcp, bucket, bc_list) { in xlog_recover_buffer_pass1()
1612 list_add_tail(&bcp->bc_list, bucket); in xlog_recover_buffer_pass1()
1630 struct list_head *bucket; in xlog_peek_buffer_cancelled() local
1639 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); in xlog_peek_buffer_cancelled()
1640 list_for_each_entry(bcp, bucket, bc_list) { in xlog_peek_buffer_cancelled()
3962 int bucket) in xlog_recover_clear_agi_bucket() argument
3980 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); in xlog_recover_clear_agi_bucket()
3982 (sizeof(xfs_agino_t) * bucket); in xlog_recover_clear_agi_bucket()
[all …]
/linux-4.4.14/Documentation/vDSO/
Dparse_vdso.c72 ELF(Word) *bucket, *chain;
177 vdso_info.bucket = &hash[2]; in vdso_init_from_sysinfo_ehdr()
229 ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket]; in vdso_sym()
/linux-4.4.14/arch/tile/include/arch/
Dmpipe.h43 uint_reg_t bucket : 13; member
66 uint_reg_t bucket : 13;
/linux-4.4.14/net/core/
Dnet-procfs.c34 unsigned int bucket; in dev_from_bucket() local
41 bucket = get_bucket(*pos) + 1; in dev_from_bucket()
42 *pos = set_bucket_offset(bucket, 1); in dev_from_bucket()
43 } while (bucket < NETDEV_HASHENTRIES); in dev_from_bucket()
Dneighbour.c2501 int bucket = state->bucket; in neigh_get_first() local
2504 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) { in neigh_get_first()
2505 n = rcu_dereference_bh(nht->hash_buckets[bucket]); in neigh_get_first()
2529 state->bucket = bucket; in neigh_get_first()
2571 if (++state->bucket >= (1 << nht->hash_shift)) in neigh_get_next()
2574 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); in neigh_get_next()
2603 int bucket = state->bucket; in pneigh_get_first() local
2606 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { in pneigh_get_first()
2607 pn = tbl->phash_buckets[bucket]; in pneigh_get_first()
2613 state->bucket = bucket; in pneigh_get_first()
[all …]
/linux-4.4.14/net/llc/
Dllc_proc.c67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) in laddr_hash_next() argument
72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) in laddr_hash_next()
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
/linux-4.4.14/net/ipv4/
Dtcp_ipv4.c1845 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
1851 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
1868 if (++st->bucket < INET_LHTABLE_SIZE) { in listening_get_next()
1869 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
1884 st->bucket = 0; in listening_get_idx()
1897 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); in empty_bucket()
1911 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { in established_get_first()
1914 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); in established_get_first()
1921 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { in established_get_first()
1952 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); in established_get_next()
[all …]
Dping.c1022 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; in ping_get_first()
1023 ++state->bucket) { in ping_get_first()
1027 hslot = &ping_table.hash[state->bucket]; in ping_get_first()
1053 return ping_get_first(seq, state->bucket + 1); in ping_get_next()
1070 state->bucket = 0; in ping_seq_start()
1105 int bucket) in ping_v4_format_sock() argument
1115 bucket, src, srcp, dest, destp, sp->sk_state, in ping_v4_format_sock()
1135 ping_v4_format_sock(v, seq, state->bucket); in ping_v4_seq_show()
Draw.c951 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE; in raw_get_first()
952 ++state->bucket) { in raw_get_first()
953 sk_for_each(sk, &state->h->ht[state->bucket]) in raw_get_first()
972 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) { in raw_get_next()
973 sk = sk_head(&state->h->ht[state->bucket]); in raw_get_next()
1045 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); in raw_seq_show()
Dudp.c2308 for (state->bucket = start; state->bucket <= state->udp_table->mask; in udp_get_first()
2309 ++state->bucket) { in udp_get_first()
2311 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; in udp_get_first()
2340 if (state->bucket <= state->udp_table->mask) in udp_get_next()
2341 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); in udp_get_next()
2342 return udp_get_first(seq, state->bucket + 1); in udp_get_next()
2360 state->bucket = MAX_UDP_PORTS; in udp_seq_start()
2382 if (state->bucket <= state->udp_table->mask) in udp_seq_stop()
2383 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); in udp_seq_stop()
2430 int bucket) in udp4_format_sock() argument
[all …]
/linux-4.4.14/net/ipv6/
Dping.c206 int bucket = ((struct ping_iter_state *) seq->private)->bucket; in ping_v6_seq_show() local
210 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); in ping_v6_seq_show()
Dip6_flowlabel.c697 int bucket; member
708 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { in ip6fl_get_first()
709 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_first()
730 if (++state->bucket <= FL_HASH_MASK) { in ip6fl_get_next()
731 for_each_fl_rcu(state->bucket, fl) { in ip6fl_get_next()
Dudp.c1472 int bucket = ((struct udp_iter_state *)seq->private)->bucket; in udp6_seq_show() local
1476 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); in udp6_seq_show()
Ddatagram.c963 __u16 srcp, __u16 destp, int bucket) in ip6_dgram_sock_seq_show() argument
972 bucket, in ip6_dgram_sock_seq_show()
Daddrconf.c3749 int bucket; member
3762 state->bucket = 0; in if6_get_first()
3766 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { in if6_get_first()
3767 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket], in if6_get_first()
3800 while (++state->bucket < IN6_ADDR_HSIZE) { in if6_get_next()
3803 &inet6_addr_lst[state->bucket], addr_lst) { in if6_get_next()
Draw.c1246 raw_seq_private(seq)->bucket); in raw6_seq_show()
/linux-4.4.14/arch/tile/gxio/
Dmpipe.c174 unsigned int bucket, in gxio_mpipe_init_notif_group_and_buckets() argument
199 result = gxio_mpipe_init_bucket(context, bucket + i, in gxio_mpipe_init_notif_group_and_buckets()
233 unsigned int bucket, unsigned int num_buckets, in gxio_mpipe_rules_begin() argument
281 rule->bucket_first = bucket; in gxio_mpipe_rules_begin()
Diorpc_mpipe.c243 unsigned int bucket; member
247 int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context, unsigned int bucket, in gxio_mpipe_init_bucket() argument
253 params->bucket = bucket; in gxio_mpipe_init_bucket()
/linux-4.4.14/net/batman-adv/
Dfragmentation.c151 u8 bucket; in batadv_frag_insert_packet() local
164 bucket = seqno % BATADV_FRAG_BUFFER_COUNT; in batadv_frag_insert_packet()
177 chain = &orig_node->fragments[bucket]; in batadv_frag_insert_packet()
/linux-4.4.14/include/net/
Draw.h44 int bucket; member
Dtransp_v6.h48 __u16 srcp, __u16 destp, int bucket);
Dping.h51 int bucket; member
Dudp.h316 int bucket; member
Dneighbour.h370 unsigned int bucket; member
Dtcp.h1649 int bucket, offset, sbucket, num; member
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dtrace.h331 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
332 TP_ARGS(qp, bucket),
336 __field(u32, bucket)
341 __entry->bucket = bucket;
347 __entry->bucket
352 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
353 TP_ARGS(qp, bucket));
356 TP_PROTO(struct hfi1_qp *qp, u32 bucket),
357 TP_ARGS(qp, bucket));
/linux-4.4.14/drivers/md/
Ddm-cache-policy-cleaner.c145 struct hlist_head *bucket = &hash->table[h]; in lookup_cache_entry() local
147 hlist_for_each_entry(cur, bucket, hlist) { in lookup_cache_entry()
151 hlist_add_head(&cur->hlist, bucket); in lookup_cache_entry()
Ddm-cache-policy-smq.c586 static struct entry *h_head(struct hash_table *ht, unsigned bucket) in h_head() argument
588 return to_entry(ht->es, ht->buckets[bucket]); in h_head()
596 static void __h_insert(struct hash_table *ht, unsigned bucket, struct entry *e) in __h_insert() argument
598 e->hash_next = ht->buckets[bucket]; in __h_insert()
599 ht->buckets[bucket] = to_index(ht->es, e); in __h_insert()
Ddm-region-hash.c272 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() local
274 list_for_each_entry(reg, bucket, hash_list) in __rh_lookup()
Ddm-cache-policy-mq.c505 struct hlist_head *bucket = mq->table + h; in hash_lookup() local
508 hlist_for_each_entry(e, bucket, hlist) in hash_lookup()
511 hlist_add_head(&e->hlist, bucket); in hash_lookup()
/linux-4.4.14/drivers/infiniband/core/
Dfmr_pool.c119 struct hlist_head *bucket; in ib_fmr_cache_lookup() local
125 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
127 hlist_for_each_entry(fmr, bucket, cache_node) in ib_fmr_cache_lookup()
/linux-4.4.14/arch/mips/include/asm/netlogic/xlr/
Dfmn.h314 static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid, in nlm_fmn_receive() argument
319 nlm_msgld(bucket); in nlm_fmn_receive()
/linux-4.4.14/net/appletalk/
Daarp.c911 int bucket; member
923 int ct = iter->bucket; in iter_next()
933 iter->bucket = ct; in iter_next()
960 iter->bucket = 0; in aarp_seq_start()
982 ++iter->bucket; in aarp_seq_next()
/linux-4.4.14/drivers/crypto/nx/
Dnx-842-pseries.c140 int bucket = fls(time); in ibm_nx842_incr_hist() local
142 if (bucket) in ibm_nx842_incr_hist()
143 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); in ibm_nx842_incr_hist()
145 atomic64_inc(&times[bucket]); in ibm_nx842_incr_hist()
/linux-4.4.14/Documentation/filesystems/
Domfs.txt68 __be64 i_sibling; /* next inode in hash bucket */
79 hashed then prepended into the bucket list beginning at OMFS_DIR_START.
Dlogfs.txt194 collisions necessary to overflow a bucket, but testing showed that in
195 10,000 runs the lowest directory fill before a bucket overflow was
197 directory sizes of up to a million, bucket overflows should be
Df2fs.txt474 "A(2B)" means a bucket includes 2 data blocks.
477 A : bucket
509 one bucket determined by the following equation, which shows O(log(# of files))
512 bucket number to scan in level #n = (hash value) % (# of buckets in level #n)
Dpath-lookup.txt80 and use that to select a bucket in the dcache-hash table. The list of entries
81 in that bucket is then walked, and we do a full comparison of each entry
110 it in the new hash bucket. So what is done is to insert the dentry into the
/linux-4.4.14/arch/x86/kernel/
Dapm_32.c913 unsigned int bucket; in apm_cpu_idle() local
933 bucket = IDLE_LEAKY_MAX; in apm_cpu_idle()
944 if (bucket) { in apm_cpu_idle()
945 bucket = IDLE_LEAKY_MAX; in apm_cpu_idle()
948 } else if (bucket) { in apm_cpu_idle()
949 bucket--; in apm_cpu_idle()
/linux-4.4.14/fs/ocfs2/dlm/
Ddlmdomain.c171 struct hlist_head *bucket; in __dlm_insert_lockres() local
177 bucket = dlm_lockres_hash(dlm, q->hash); in __dlm_insert_lockres()
182 hlist_add_head(&res->hash_node, bucket); in __dlm_insert_lockres()
193 struct hlist_head *bucket; in __dlm_lookup_lockres_full() local
200 bucket = dlm_lockres_hash(dlm, hash); in __dlm_lookup_lockres_full()
202 hlist_for_each_entry(res, bucket, hash_node) { in __dlm_lookup_lockres_full()
425 struct hlist_head *bucket; in dlm_migrate_all_locks() local
435 bucket = dlm_lockres_hash(dlm, i); in dlm_migrate_all_locks()
436 iter = bucket->first; in dlm_migrate_all_locks()
Ddlmmaster.c330 struct hlist_head *bucket; in __dlm_insert_mle() local
334 bucket = dlm_master_hash(dlm, mle->mnamehash); in __dlm_insert_mle()
335 hlist_add_head(&mle->master_hash_node, bucket); in __dlm_insert_mle()
344 struct hlist_head *bucket; in dlm_find_mle() local
350 bucket = dlm_master_hash(dlm, hash); in dlm_find_mle()
351 hlist_for_each_entry(tmpmle, bucket, master_hash_node) { in dlm_find_mle()
3264 struct hlist_head *bucket; in dlm_clean_master_list() local
3275 bucket = dlm_master_hash(dlm, i); in dlm_clean_master_list()
3276 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { in dlm_clean_master_list()
3464 struct hlist_head *bucket; in dlm_force_free_mles() local
[all …]
Ddlmdebug.c444 struct hlist_head *bucket; in debug_mle_print() local
453 bucket = dlm_master_hash(dlm, i); in debug_mle_print()
454 hlist_for_each_entry(mle, bucket, master_hash_node) { in debug_mle_print()
Ddlmrecovery.c2127 struct hlist_head *bucket; in dlm_finish_local_lockres_recovery() local
2156 bucket = dlm_lockres_hash(dlm, i); in dlm_finish_local_lockres_recovery()
2157 hlist_for_each_entry(res, bucket, hash_node) { in dlm_finish_local_lockres_recovery()
2318 struct hlist_head *bucket; in dlm_do_local_recovery_cleanup() local
2340 bucket = dlm_lockres_hash(dlm, i); in dlm_do_local_recovery_cleanup()
2341 hlist_for_each_entry(res, bucket, hash_node) { in dlm_do_local_recovery_cleanup()
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_cs.c42 struct list_head bucket[RADEON_CS_NUM_BUCKETS]; member
50 INIT_LIST_HEAD(&b->bucket[i]); in radeon_cs_buckets_init()
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]); in radeon_cs_buckets_add()
71 list_splice(&b->bucket[i], out_list); in radeon_cs_buckets_get_list()
/linux-4.4.14/drivers/atm/
Dhorizon.c2280 unsigned int bucket; in hrz_open()
2320 bucket = mbs*(pcr-scr)/pcr; in hrz_open()
2321 if (bucket*pcr != mbs*(pcr-scr)) in hrz_open()
2322 bucket += 1; in hrz_open()
2323 if (bucket > BUCKET_MAX_SIZE) { in hrz_open()
2325 bucket, BUCKET_MAX_SIZE); in hrz_open()
2326 bucket = BUCKET_MAX_SIZE; in hrz_open()
2329 vcc.tx_bucket_bits = bucket; in hrz_open()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Den_netdev.c636 struct hlist_head *bucket; in mlx4_en_replace_mac() local
642 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_replace_mac()
643 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_replace_mac()
1054 struct hlist_head *bucket; in mlx4_en_do_uc_filter() local
1065 bucket = &priv->mac_hash[i]; in mlx4_en_do_uc_filter()
1066 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { in mlx4_en_do_uc_filter()
1109 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]]; in mlx4_en_do_uc_filter()
1110 hlist_for_each_entry(entry, bucket, hlist) { in mlx4_en_do_uc_filter()
1150 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_do_uc_filter()
1151 hlist_add_head_rcu(&entry->hlist, bucket); in mlx4_en_do_uc_filter()
[all …]
Den_rx.c816 struct hlist_head *bucket; in mlx4_en_process_rx_cq() local
821 bucket = &priv->mac_hash[mac_hash]; in mlx4_en_process_rx_cq()
823 hlist_for_each_entry_rcu(entry, bucket, hlist) { in mlx4_en_process_rx_cq()
/linux-4.4.14/net/decnet/
Ddn_route.c1780 int bucket; member
1788 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { in dn_rt_cache_get_first()
1790 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); in dn_rt_cache_get_first()
1805 if (--s->bucket < 0) in dn_rt_cache_get_next()
1808 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); in dn_rt_cache_get_next()
Daf_decnet.c2117 int bucket; member
2125 for(state->bucket = 0; in dn_socket_get_first()
2126 state->bucket < DN_SK_HASH_SIZE; in dn_socket_get_first()
2127 ++state->bucket) { in dn_socket_get_first()
2128 n = sk_head(&dn_sk_hash[state->bucket]); in dn_socket_get_first()
2145 if (++state->bucket >= DN_SK_HASH_SIZE) in dn_socket_get_next()
2147 n = sk_head(&dn_sk_hash[state->bucket]); in dn_socket_get_next()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_cs.c41 struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; member
49 INIT_LIST_HEAD(&b->bucket[i]); in amdgpu_cs_buckets_init()
60 list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]); in amdgpu_cs_buckets_add()
70 list_splice(&b->bucket[i], out_list); in amdgpu_cs_buckets_get_list()
/linux-4.4.14/fs/omfs/
Ddir.c28 int bucket = omfs_hash(name, namelen, nbuckets); in omfs_get_bucket() local
30 *ofs = OMFS_DIR_START + bucket * 8; in omfs_get_bucket()
/linux-4.4.14/fs/btrfs/
Draid56.c350 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache() local
362 h = table->table + bucket; in __remove_rbio_from_cache()
660 int bucket = rbio_bucket(rbio); in lock_stripe_add() local
661 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; in lock_stripe_add()
749 int bucket; in unlock_stripe() local
754 bucket = rbio_bucket(rbio); in unlock_stripe()
755 h = rbio->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
/linux-4.4.14/net/unix/
Daf_unix.c2735 unsigned long bucket = get_bucket(*pos); in unix_from_bucket() local
2739 for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) { in unix_from_bucket()
2753 unsigned long bucket; in unix_next_socket() local
2769 bucket = get_bucket(*pos) + 1; in unix_next_socket()
2770 *pos = set_bucket_offset(bucket, 1); in unix_next_socket()
2771 } while (bucket < ARRAY_SIZE(unix_socket_table)); in unix_next_socket()
/linux-4.4.14/drivers/media/v4l2-core/
Dv4l2-ctrls.c1781 int bucket; in find_ref() local
1788 bucket = id % hdl->nr_of_buckets; in find_ref()
1795 ref = hdl->buckets ? hdl->buckets[bucket] : NULL; in find_ref()
1835 int bucket = id % hdl->nr_of_buckets; /* which bucket to use */ in handler_new_ref() local
1890 new_ref->next = hdl->buckets[bucket]; in handler_new_ref()
1891 hdl->buckets[bucket] = new_ref; in handler_new_ref()
/linux-4.4.14/Documentation/
Dbcache.txt11 extants (which can be anywhere from a single sector to the bucket size). It's
393 Counts instances where while data was being read from the cache, the bucket
417 Boolean; if on a discard/TRIM will be issued to each bucket before it is
Drbtree.txt30 packets in the "hierarchical token bucket" scheduler.
Dkernel-parameters.txt3014 Param: <number> - step/bucket size as a power of 2 for
/linux-4.4.14/Documentation/locking/
Dlockdep-design.txt248 a hash table with 8192 buckets where each bucket has its own
253 the per-bucket spinlocks would guarantee lock-class overflow.
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-block-bcache120 For a cache, bucket size in human readable units, as set at
/linux-4.4.14/net/netfilter/ipset/
Dip_set_hash_gen.h84 struct hbucket __rcu *bucket[0]; /* hashtable buckets */ member
87 #define hbucket(h, i) ((h)->bucket[i])
/linux-4.4.14/Documentation/networking/
Dscaling.txt201 bucket and incrementing a per-bucket counter. The hash function is
Dip-sysctl.txt758 hash bucket containing a number of cache entries. This variable limits
759 the number of entries in each hash bucket; the larger the value the
761 entries in a given hash bucket reaches this limit adding new entries
762 causes the oldest entry in the bucket to be removed to make room.
/linux-4.4.14/drivers/net/
DKconfig61 This is essentially a bit-bucket device (i.e. traffic you send to
/linux-4.4.14/drivers/block/
Dcciss.c213 static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
3915 static void calc_bucket_map(int bucket[], int num_buckets, in calc_bucket_map() argument
3930 if (bucket[j] >= size) { in calc_bucket_map()
/linux-4.4.14/drivers/message/fusion/lsi/
Dmpi_history.txt657 * Changed transaction context usage to bucket/buffer.
/linux-4.4.14/drivers/scsi/
Dhpsa.c267 static void calc_bucket_map(int *bucket, int num_buckets,
8802 static void calc_bucket_map(int bucket[], int num_buckets, in calc_bucket_map() argument
8814 if (bucket[j] >= size) { in calc_bucket_map()
/linux-4.4.14/
DCREDITS2153 E: beans@bucket.ualr.edu