bucket             98 arch/hexagon/kernel/ptrace.c 	unsigned long bucket;
bucket            128 arch/hexagon/kernel/ptrace.c 	INEXT(&bucket, cause);
bucket            129 arch/hexagon/kernel/ptrace.c 	INEXT(&bucket, badva);
bucket            312 arch/mips/include/asm/netlogic/xlr/fmn.h static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid,
bucket            317 arch/mips/include/asm/netlogic/xlr/fmn.h 	nlm_msgld(bucket);
bucket             72 arch/mips/netlogic/xlr/fmn.c 	int bucket, rv;
bucket             86 arch/mips/netlogic/xlr/fmn.c 		for (bucket = 0; bucket < 8; bucket++) {
bucket             88 arch/mips/netlogic/xlr/fmn.c 			if (bkt_status & (1 << bucket))
bucket             90 arch/mips/netlogic/xlr/fmn.c 			rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
bucket            101 arch/mips/netlogic/xlr/fmn.c 				hndlr->action(bucket, src_stnid, size, code,
bucket            206 arch/sparc/kernel/irq_64.c 	struct ino_bucket bucket;
bucket            257 arch/sparc/kernel/irq_64.c 	struct ino_bucket *bucket;
bucket            268 arch/sparc/kernel/irq_64.c 		bucket = (struct ino_bucket *) __va(cookie);
bucket            269 arch/sparc/kernel/irq_64.c 		irq = bucket->__irq;
bucket            278 arch/sparc/kernel/irq_64.c 	struct ino_bucket *bucket;
bucket            281 arch/sparc/kernel/irq_64.c 	bucket = &ivector_table[sysino];
bucket            282 arch/sparc/kernel/irq_64.c 	irq = bucket_get_irq(__pa(bucket));
bucket            615 arch/sparc/kernel/irq_64.c 	struct ino_bucket *bucket;
bucket            622 arch/sparc/kernel/irq_64.c 	bucket = &ivector_table[ino];
bucket            623 arch/sparc/kernel/irq_64.c 	irq = bucket_get_irq(__pa(bucket));
bucket            626 arch/sparc/kernel/irq_64.c 		bucket_set_irq(__pa(bucket), irq);
bucket            687 arch/sparc/kernel/irq_64.c 	ihd->bucket.__irq = irq;
bucket            688 arch/sparc/kernel/irq_64.c 	cookie = ~__pa(&ihd->bucket);
bucket            738 arch/sparc/kernel/irq_64.c 	struct ino_bucket *bucket;
bucket            743 arch/sparc/kernel/irq_64.c 	bucket = &ivector_table[sysino];
bucket            744 arch/sparc/kernel/irq_64.c 	bucket_set_irq(__pa(bucket), irq);
bucket            910 arch/x86/kernel/apm_32.c 	unsigned int bucket;
bucket            930 arch/x86/kernel/apm_32.c 	bucket = IDLE_LEAKY_MAX;
bucket            941 arch/x86/kernel/apm_32.c 					if (bucket) {
bucket            942 arch/x86/kernel/apm_32.c 						bucket = IDLE_LEAKY_MAX;
bucket            945 arch/x86/kernel/apm_32.c 				} else if (bucket) {
bucket            946 arch/x86/kernel/apm_32.c 					bucket--;
bucket             30 block/blk-mq-debugfs.c 	int bucket;
bucket             32 block/blk-mq-debugfs.c 	for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
bucket             33 block/blk-mq-debugfs.c 		seq_printf(m, "read  (%d Bytes): ", 1 << (9 + bucket));
bucket             34 block/blk-mq-debugfs.c 		print_stat(m, &q->poll_stat[2 * bucket]);
bucket             37 block/blk-mq-debugfs.c 		seq_printf(m, "write (%d Bytes): ",  1 << (9 + bucket));
bucket             38 block/blk-mq-debugfs.c 		print_stat(m, &q->poll_stat[2 * bucket + 1]);
bucket             48 block/blk-mq.c 	int ddir, sectors, bucket;
bucket             53 block/blk-mq.c 	bucket = ddir + 2 * ilog2(sectors);
bucket             55 block/blk-mq.c 	if (bucket < 0)
bucket             57 block/blk-mq.c 	else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
bucket             60 block/blk-mq.c 	return bucket;
bucket           3368 block/blk-mq.c 	int bucket;
bucket           3370 block/blk-mq.c 	for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
bucket           3371 block/blk-mq.c 		if (cb->stat[bucket].nr_samples)
bucket           3372 block/blk-mq.c 			q->poll_stat[bucket] = cb->stat[bucket];
bucket           3381 block/blk-mq.c 	int bucket;
bucket           3399 block/blk-mq.c 	bucket = blk_mq_poll_stats_bkt(rq);
bucket           3400 block/blk-mq.c 	if (bucket < 0)
bucket           3403 block/blk-mq.c 	if (q->poll_stat[bucket].nr_samples)
bucket           3404 block/blk-mq.c 		ret = (q->poll_stat[bucket].mean + 1) / 2;
bucket             56 block/blk-stat.c 	int bucket;
bucket             68 block/blk-stat.c 		bucket = cb->bucket_fn(rq);
bucket             69 block/blk-stat.c 		if (bucket < 0)
bucket             72 block/blk-stat.c 		stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
bucket             82 block/blk-stat.c 	unsigned int bucket;
bucket             85 block/blk-stat.c 	for (bucket = 0; bucket < cb->buckets; bucket++)
bucket             86 block/blk-stat.c 		blk_rq_stat_init(&cb->stat[bucket]);
bucket             92 block/blk-stat.c 		for (bucket = 0; bucket < cb->buckets; bucket++) {
bucket             93 block/blk-stat.c 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
bucket             94 block/blk-stat.c 			blk_rq_stat_init(&cpu_stat[bucket]);
bucket            138 block/blk-stat.c 	unsigned int bucket;
bucket            145 block/blk-stat.c 		for (bucket = 0; bucket < cb->buckets; bucket++)
bucket            146 block/blk-stat.c 			blk_rq_stat_init(&cpu_stat[bucket]);
bucket           2054 block/blk-throttle.c 				struct latency_bucket *bucket;
bucket           2057 block/blk-throttle.c 				bucket = per_cpu_ptr(td->latency_buckets[rw],
bucket           2059 block/blk-throttle.c 				tmp->total_latency += bucket[i].total_latency;
bucket           2060 block/blk-throttle.c 				tmp->samples += bucket[i].samples;
bucket           2061 block/blk-throttle.c 				bucket[i].total_latency = 0;
bucket           2062 block/blk-throttle.c 				bucket[i].samples = 0;
bucket           2285 block/blk-throttle.c 		int bucket;
bucket           2288 block/blk-throttle.c 		bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
bucket           2289 block/blk-throttle.c 		threshold = tg->td->avg_buckets[rw][bucket].latency +
bucket            215 block/kyber-iosched.c 	unsigned int bucket;
bucket            217 block/kyber-iosched.c 	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
bucket            218 block/kyber-iosched.c 		buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
bucket            230 block/kyber-iosched.c 	unsigned int bucket, samples = 0, percentile_samples;
bucket            232 block/kyber-iosched.c 	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
bucket            233 block/kyber-iosched.c 		samples += buckets[bucket];
bucket            251 block/kyber-iosched.c 	for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
bucket            252 block/kyber-iosched.c 		if (buckets[bucket] >= percentile_samples)
bucket            254 block/kyber-iosched.c 		percentile_samples -= buckets[bucket];
bucket            260 block/kyber-iosched.c 			    bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
bucket            262 block/kyber-iosched.c 	return bucket;
bucket            621 block/kyber-iosched.c 	unsigned int bucket;
bucket            626 block/kyber-iosched.c 		bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
bucket            629 block/kyber-iosched.c 		bucket = 0;
bucket            632 block/kyber-iosched.c 	atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
bucket           2266 drivers/atm/horizon.c 	unsigned int bucket;
bucket           2306 drivers/atm/horizon.c 	bucket = mbs*(pcr-scr)/pcr;
bucket           2307 drivers/atm/horizon.c 	if (bucket*pcr != mbs*(pcr-scr))
bucket           2308 drivers/atm/horizon.c 	  bucket += 1;
bucket           2309 drivers/atm/horizon.c 	if (bucket > BUCKET_MAX_SIZE) {
bucket           2311 drivers/atm/horizon.c 		  bucket, BUCKET_MAX_SIZE);
bucket           2312 drivers/atm/horizon.c 	  bucket = BUCKET_MAX_SIZE;
bucket           2315 drivers/atm/horizon.c 	vcc.tx_bucket_bits = bucket;
bucket            124 drivers/cpuidle/governors/menu.c 	unsigned int	bucket;
bucket            132 drivers/cpuidle/governors/menu.c 	int bucket = 0;
bucket            141 drivers/cpuidle/governors/menu.c 		bucket = BUCKETS/2;
bucket            144 drivers/cpuidle/governors/menu.c 		return bucket;
bucket            146 drivers/cpuidle/governors/menu.c 		return bucket + 1;
bucket            148 drivers/cpuidle/governors/menu.c 		return bucket + 2;
bucket            150 drivers/cpuidle/governors/menu.c 		return bucket + 3;
bucket            152 drivers/cpuidle/governors/menu.c 		return bucket + 4;
bucket            153 drivers/cpuidle/governors/menu.c 	return bucket + 5;
bucket            296 drivers/cpuidle/governors/menu.c 	data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
bucket            317 drivers/cpuidle/governors/menu.c 					 data->correction_factor[data->bucket],
bucket            521 drivers/cpuidle/governors/menu.c 	new_factor = data->correction_factor[data->bucket];
bucket            542 drivers/cpuidle/governors/menu.c 	data->correction_factor[data->bucket] = new_factor;
bucket            127 drivers/crypto/nx/nx-842-pseries.c 	int bucket = fls(time);
bucket            129 drivers/crypto/nx/nx-842-pseries.c 	if (bucket)
bucket            130 drivers/crypto/nx/nx-842-pseries.c 		bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
bucket            132 drivers/crypto/nx/nx-842-pseries.c 	atomic64_inc(&times[bucket]);
bucket            191 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
bucket            196 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		INIT_LIST_HEAD(&bucket[i]);
bucket            208 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 			list_add_tail(&e->tv.head, &bucket[priority]);
bucket            215 drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c 		list_splice(&bucket[i], validated);
bucket             48 drivers/gpu/drm/radeon/radeon_cs.c 	struct list_head bucket[RADEON_CS_NUM_BUCKETS];
bucket             56 drivers/gpu/drm/radeon/radeon_cs.c 		INIT_LIST_HEAD(&b->bucket[i]);
bucket             67 drivers/gpu/drm/radeon/radeon_cs.c 	list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
bucket             77 drivers/gpu/drm/radeon/radeon_cs.c 		list_splice(&b->bucket[i], out_list);
bucket            120 drivers/infiniband/core/fmr_pool.c 	struct hlist_head *bucket;
bucket            126 drivers/infiniband/core/fmr_pool.c 	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
bucket            128 drivers/infiniband/core/fmr_pool.c 	hlist_for_each_entry(fmr, bucket, cache_node)
bucket             60 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 bucket),
bucket             61 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, bucket),
bucket             65 drivers/infiniband/sw/rdmavt/trace_qp.h 		__field(u32, bucket)
bucket             70 drivers/infiniband/sw/rdmavt/trace_qp.h 		__entry->bucket = bucket;
bucket             76 drivers/infiniband/sw/rdmavt/trace_qp.h 		__entry->bucket
bucket             81 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 bucket),
bucket             82 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, bucket));
bucket             85 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_PROTO(struct rvt_qp *qp, u32 bucket),
bucket             86 drivers/infiniband/sw/rdmavt/trace_qp.h 	TP_ARGS(qp, bucket));
bucket            557 drivers/interconnect/qcom/sdm845.c static void tcs_list_gen(struct list_head *bcm_list, int bucket,
bucket            575 drivers/interconnect/qcom/sdm845.c 		tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
bucket            576 drivers/interconnect/qcom/sdm845.c 			    bcm->vote_y[bucket], bcm->addr, commit);
bucket            597 drivers/interconnect/qcom/sdm845.c 	size_t i, bucket;
bucket            602 drivers/interconnect/qcom/sdm845.c 	for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
bucket            604 drivers/interconnect/qcom/sdm845.c 			temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
bucket            606 drivers/interconnect/qcom/sdm845.c 			agg_avg[bucket] = max(agg_avg[bucket], temp);
bucket            608 drivers/interconnect/qcom/sdm845.c 			temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
bucket            610 drivers/interconnect/qcom/sdm845.c 			agg_peak[bucket] = max(agg_peak[bucket], temp);
bucket            613 drivers/interconnect/qcom/sdm845.c 		temp = agg_avg[bucket] * 1000ULL;
bucket            615 drivers/interconnect/qcom/sdm845.c 		bcm->vote_x[bucket] = temp;
bucket            617 drivers/interconnect/qcom/sdm845.c 		temp = agg_peak[bucket] * 1000ULL;
bucket            619 drivers/interconnect/qcom/sdm845.c 		bcm->vote_y[bucket] = temp;
bucket            383 drivers/lightnvm/pblk-sysfs.c static long long bucket_percentage(unsigned long long bucket,
bucket            386 drivers/lightnvm/pblk-sysfs.c 	int p = bucket * 100;
bucket             76 drivers/md/bcache/alloc.c uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
bucket             89 drivers/md/bcache/alloc.c 	struct bucket *b;
bucket            126 drivers/md/bcache/alloc.c static inline bool can_inc_bucket_gen(struct bucket *b)
bucket            131 drivers/md/bcache/alloc.c bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
bucket            141 drivers/md/bcache/alloc.c void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bucket            154 drivers/md/bcache/alloc.c static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
bucket            182 drivers/md/bcache/alloc.c 	struct bucket *b;
bucket            219 drivers/md/bcache/alloc.c 	struct bucket *b;
bucket            242 drivers/md/bcache/alloc.c 	struct bucket *b;
bucket            303 drivers/md/bcache/alloc.c static int bch_allocator_push(struct cache *ca, long bucket)
bucket            308 drivers/md/bcache/alloc.c 	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
bucket            312 drivers/md/bcache/alloc.c 		if (fifo_push(&ca->free[i], bucket))
bucket            331 drivers/md/bcache/alloc.c 			long bucket;
bucket            333 drivers/md/bcache/alloc.c 			if (!fifo_pop(&ca->free_inc, bucket))
bucket            339 drivers/md/bcache/alloc.c 					bucket_to_sector(ca->set, bucket),
bucket            344 drivers/md/bcache/alloc.c 			allocator_wait(ca, bch_allocator_push(ca, bucket));
bucket            396 drivers/md/bcache/alloc.c 	struct bucket *b;
bucket            470 drivers/md/bcache/alloc.c void __bch_bucket_free(struct cache *ca, struct bucket *b)
bucket            210 drivers/md/bcache/bcache.h BITMASK(GC_MARK,	 struct bucket, gc_mark, 0, 2);
bucket            216 drivers/md/bcache/bcache.h BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
bucket            217 drivers/md/bcache/bcache.h BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
bucket            445 drivers/md/bcache/bcache.h 	struct bucket		*buckets;
bucket            447 drivers/md/bcache/bcache.h 	DECLARE_HEAP(struct bucket *, heap);
bucket            796 drivers/md/bcache/bcache.h static inline struct bucket *PTR_BUCKET(struct cache_set *c,
bucket            895 drivers/md/bcache/bcache.h static inline uint8_t bucket_gc_gen(struct bucket *b)
bucket            961 drivers/md/bcache/bcache.h uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
bucket            964 drivers/md/bcache/bcache.h bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
bucket            965 drivers/md/bcache/bcache.h void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
bucket            967 drivers/md/bcache/bcache.h void __bch_bucket_free(struct cache *ca, struct bucket *b);
bucket           1241 drivers/md/bcache/btree.c 	struct bucket *g;
bucket           1297 drivers/md/bcache/btree.c 			struct bucket *b = PTR_BUCKET(c, k, i);
bucket           1738 drivers/md/bcache/btree.c 	struct bucket *b;
bucket           1763 drivers/md/bcache/btree.c 	struct bucket *b;
bucket           1965 drivers/md/bcache/btree.c 	struct bucket *b;
bucket             54 drivers/md/bcache/extents.c 			size_t bucket = PTR_BUCKET_NR(c, k, i);
bucket             58 drivers/md/bcache/extents.c 			    bucket <  ca->sb.first_bucket ||
bucket             59 drivers/md/bcache/extents.c 			    bucket >= ca->sb.nbuckets)
bucket             75 drivers/md/bcache/extents.c 			size_t bucket = PTR_BUCKET_NR(c, k, i);
bucket             80 drivers/md/bcache/extents.c 			if (bucket <  ca->sb.first_bucket)
bucket             82 drivers/md/bcache/extents.c 			if (bucket >= ca->sb.nbuckets)
bucket            177 drivers/md/bcache/extents.c 	struct bucket *g;
bucket            510 drivers/md/bcache/extents.c 	struct bucket *g = PTR_BUCKET(b->c, k, ptr);
bucket             46 drivers/md/bcache/journal.c 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
bucket             57 drivers/md/bcache/journal.c 		bio->bi_iter.bi_sector	= bucket + offset;
bucket            185 drivers/md/bcache/movinggc.c static bool bucket_cmp(struct bucket *l, struct bucket *r)
bucket            192 drivers/md/bcache/movinggc.c 	struct bucket *b;
bucket            200 drivers/md/bcache/movinggc.c 	struct bucket *b;
bucket            511 drivers/md/bcache/super.c static void prio_io(struct cache *ca, uint64_t bucket, int op,
bucket            519 drivers/md/bcache/super.c 	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
bucket            535 drivers/md/bcache/super.c 	struct bucket *b;
bucket            565 drivers/md/bcache/super.c 		long bucket;
bucket            581 drivers/md/bcache/super.c 		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
bucket            582 drivers/md/bcache/super.c 		BUG_ON(bucket == -1);
bucket            585 drivers/md/bcache/super.c 		prio_io(ca, bucket, REQ_OP_WRITE, 0);
bucket            588 drivers/md/bcache/super.c 		ca->prio_buckets[i] = bucket;
bucket            589 drivers/md/bcache/super.c 		atomic_dec_bug(&ca->buckets[bucket].pin);
bucket            613 drivers/md/bcache/super.c static void prio_read(struct cache *ca, uint64_t bucket)
bucket            617 drivers/md/bcache/super.c 	struct bucket *b;
bucket            624 drivers/md/bcache/super.c 			ca->prio_buckets[bucket_nr] = bucket;
bucket            625 drivers/md/bcache/super.c 			ca->prio_last_buckets[bucket_nr] = bucket;
bucket            628 drivers/md/bcache/super.c 			prio_io(ca, bucket, REQ_OP_READ, 0);
bucket            637 drivers/md/bcache/super.c 			bucket = p->next_bucket;
bucket           2153 drivers/md/bcache/super.c 	struct bucket *b;
bucket           2211 drivers/md/bcache/super.c 	ca->buckets = vzalloc(array_size(sizeof(struct bucket),
bucket           1017 drivers/md/bcache/sysfs.c 		struct bucket *b;
bucket            606 drivers/md/dm-cache-policy-smq.c static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
bucket            608 drivers/md/dm-cache-policy-smq.c 	return to_entry(ht->es, ht->buckets[bucket]);
bucket            616 drivers/md/dm-cache-policy-smq.c static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
bucket            618 drivers/md/dm-cache-policy-smq.c 	e->hash_next = ht->buckets[bucket];
bucket            619 drivers/md/dm-cache-policy-smq.c 	ht->buckets[bucket] = to_index(ht->es, e);
bucket            571 drivers/md/dm-clone-target.c #define bucket_lock_irqsave(bucket, flags) \
bucket            572 drivers/md/dm-clone-target.c 	spin_lock_irqsave(&(bucket)->lock, flags)
bucket            574 drivers/md/dm-clone-target.c #define bucket_unlock_irqrestore(bucket, flags) \
bucket            575 drivers/md/dm-clone-target.c 	spin_unlock_irqrestore(&(bucket)->lock, flags)
bucket            580 drivers/md/dm-clone-target.c 	struct hash_table_bucket *bucket;
bucket            589 drivers/md/dm-clone-target.c 		bucket = clone->ht + i;
bucket            591 drivers/md/dm-clone-target.c 		INIT_HLIST_HEAD(&bucket->head);
bucket            592 drivers/md/dm-clone-target.c 		spin_lock_init(&bucket->lock);
bucket            614 drivers/md/dm-clone-target.c static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
bucket            619 drivers/md/dm-clone-target.c 	hlist_for_each_entry(hd, &bucket->head, h) {
bucket            632 drivers/md/dm-clone-target.c static inline void __insert_region_hydration(struct hash_table_bucket *bucket,
bucket            635 drivers/md/dm-clone-target.c 	hlist_add_head(&hd->h, &bucket->head);
bucket            646 drivers/md/dm-clone-target.c __find_or_insert_region_hydration(struct hash_table_bucket *bucket,
bucket            651 drivers/md/dm-clone-target.c 	hd2 = __hash_find(bucket, hd->region_nr);
bucket            655 drivers/md/dm-clone-target.c 	__insert_region_hydration(bucket, hd);
bucket            704 drivers/md/dm-clone-target.c 	struct hash_table_bucket *bucket;
bucket            714 drivers/md/dm-clone-target.c 	bucket = get_hash_table_bucket(clone, hd->region_nr);
bucket            717 drivers/md/dm-clone-target.c 	bucket_lock_irqsave(bucket, flags);
bucket            719 drivers/md/dm-clone-target.c 	bucket_unlock_irqrestore(bucket, flags);
bucket            879 drivers/md/dm-clone-target.c 	struct hash_table_bucket *bucket;
bucket            883 drivers/md/dm-clone-target.c 	bucket = get_hash_table_bucket(clone, region_nr);
bucket            885 drivers/md/dm-clone-target.c 	bucket_lock_irqsave(bucket, flags);
bucket            887 drivers/md/dm-clone-target.c 	hd = __hash_find(bucket, region_nr);
bucket            891 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            897 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            906 drivers/md/dm-clone-target.c 	bucket_unlock_irqrestore(bucket, flags);
bucket            911 drivers/md/dm-clone-target.c 	bucket_lock_irqsave(bucket, flags);
bucket            915 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            921 drivers/md/dm-clone-target.c 	hd2 = __find_or_insert_region_hydration(bucket, hd);
bucket            925 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            937 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            951 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            955 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket           1023 drivers/md/dm-clone-target.c 	struct hash_table_bucket *bucket;
bucket           1035 drivers/md/dm-clone-target.c 		bucket = get_hash_table_bucket(clone, offset);
bucket           1036 drivers/md/dm-clone-target.c 		bucket_lock_irqsave(bucket, flags);
bucket           1039 drivers/md/dm-clone-target.c 		    !__hash_find(bucket, offset)) {
bucket           1041 drivers/md/dm-clone-target.c 			__insert_region_hydration(bucket, hd);
bucket           1042 drivers/md/dm-clone-target.c 			bucket_unlock_irqrestore(bucket, flags);
bucket           1050 drivers/md/dm-clone-target.c 		bucket_unlock_irqrestore(bucket, flags);
bucket            274 drivers/md/dm-region-hash.c 	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
bucket            276 drivers/md/dm-region-hash.c 	list_for_each_entry(reg, bucket, hash_list)
bucket            106 drivers/md/persistent-data/dm-transaction-manager.c 	unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
bucket            110 drivers/md/persistent-data/dm-transaction-manager.c 	hlist_for_each_entry(si, tm->buckets + bucket, hlist)
bucket            126 drivers/md/persistent-data/dm-transaction-manager.c 	unsigned bucket;
bucket            132 drivers/md/persistent-data/dm-transaction-manager.c 		bucket = dm_hash_block(b, DM_HASH_MASK);
bucket            134 drivers/md/persistent-data/dm-transaction-manager.c 		hlist_add_head(&si->hlist, tm->buckets + bucket);
bucket            143 drivers/md/persistent-data/dm-transaction-manager.c 	struct hlist_head *bucket;
bucket            148 drivers/md/persistent-data/dm-transaction-manager.c 		bucket = tm->buckets + i;
bucket            149 drivers/md/persistent-data/dm-transaction-manager.c 		hlist_for_each_entry_safe(si, tmp, bucket, hlist)
bucket            152 drivers/md/persistent-data/dm-transaction-manager.c 		INIT_HLIST_HEAD(bucket);
bucket           2218 drivers/media/v4l2-core/v4l2-ctrls.c 	int bucket;
bucket           2225 drivers/media/v4l2-core/v4l2-ctrls.c 	bucket = id % hdl->nr_of_buckets;
bucket           2232 drivers/media/v4l2-core/v4l2-ctrls.c 	ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
bucket           2274 drivers/media/v4l2-core/v4l2-ctrls.c 	int bucket = id % hdl->nr_of_buckets;	/* which bucket to use */
bucket           2330 drivers/media/v4l2-core/v4l2-ctrls.c 	new_ref->next = hdl->buckets[bucket];
bucket           2331 drivers/media/v4l2-core/v4l2-ctrls.c 	hdl->buckets[bucket] = new_ref;
bucket            120 drivers/misc/vmw_vmci/vmci_doorbell.c 	u32 bucket = VMCI_DOORBELL_HASH(idx);
bucket            123 drivers/misc/vmw_vmci/vmci_doorbell.c 	hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket],
bucket            139 drivers/misc/vmw_vmci/vmci_doorbell.c 	u32 bucket;
bucket            187 drivers/misc/vmw_vmci/vmci_doorbell.c 	bucket = VMCI_DOORBELL_HASH(entry->idx);
bucket            188 drivers/misc/vmw_vmci/vmci_doorbell.c 	hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
bucket            355 drivers/misc/vmw_vmci/vmci_doorbell.c 	u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
bucket            360 drivers/misc/vmw_vmci/vmci_doorbell.c 	hlist_for_each_entry(dbell, &vmci_doorbell_it.entries[bucket], node) {
bucket            534 drivers/net/ethernet/freescale/fman/fman_dtsec.c static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
bucket            537 drivers/net/ethernet/freescale/fman/fman_dtsec.c 	int reg_idx = (bucket >> 5) & 0xf;
bucket            538 drivers/net/ethernet/freescale/fman/fman_dtsec.c 	int bit_idx = bucket & 0x1f;
bucket           1061 drivers/net/ethernet/freescale/fman/fman_dtsec.c 	s32 bucket;
bucket           1092 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		bucket = (s32)((crc >> 23) & 0x1ff);
bucket           1094 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		bucket = (s32)((crc >> 24) & 0xff);
bucket           1099 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			bucket += 0x100;
bucket           1102 drivers/net/ethernet/freescale/fman/fman_dtsec.c 	set_bucket(dtsec->regs, bucket, true);
bucket           1114 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			      &dtsec->multicast_addr_hash->lsts[bucket]);
bucket           1117 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			      &dtsec->unicast_addr_hash->lsts[bucket]);
bucket           1172 drivers/net/ethernet/freescale/fman/fman_dtsec.c 	s32 bucket;
bucket           1193 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		bucket = (s32)((crc >> 23) & 0x1ff);
bucket           1195 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		bucket = (s32)((crc >> 24) & 0xff);
bucket           1200 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			bucket += 0x100;
bucket           1206 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			      &dtsec->multicast_addr_hash->lsts[bucket]) {
bucket           1214 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
bucket           1215 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			set_bucket(dtsec->regs, bucket, false);
bucket           1219 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			      &dtsec->unicast_addr_hash->lsts[bucket]) {
bucket           1227 drivers/net/ethernet/freescale/fman/fman_dtsec.c 		if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
bucket           1228 drivers/net/ethernet/freescale/fman/fman_dtsec.c 			set_bucket(dtsec->regs, bucket, false);
bucket            380 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c 				    enum hclge_shap_bucket bucket, u8 pg_id,
bucket            387 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
bucket            427 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c 				     enum hclge_shap_bucket bucket, u8 pri_id,
bucket            434 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
bucket            703 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		struct hlist_head *bucket;
bucket            709 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
bucket            710 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
bucket           1140 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct hlist_head *bucket;
bucket           1151 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		bucket = &priv->mac_hash[i];
bucket           1152 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
bucket           1195 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
bucket           1196 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		hlist_for_each_entry(entry, bucket, hlist) {
bucket           1236 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				bucket = &priv->mac_hash[mac_hash];
bucket           1237 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 				hlist_add_head_rcu(&entry->hlist, bucket);
bucket           1341 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 	struct hlist_head *bucket;
bucket           1346 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		bucket = &priv->mac_hash[i];
bucket           1347 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 		hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
bucket            741 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				struct hlist_head *bucket;
bucket            746 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				bucket = &priv->mac_hash[mac_hash];
bucket            747 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				hlist_for_each_entry_rcu(entry, bucket, hlist) {
bucket            171 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
bucket            173 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	chunk->ste_arr = kvzalloc(bucket->num_of_entries *
bucket            178 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
bucket            183 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	chunk->miss_list = kvmalloc(bucket->num_of_entries *
bucket            197 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
bucket            200 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_pool *pool = bucket->pool;
bucket            207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mr_req_size = bucket->num_of_entries * bucket->entry_size;
bucket            246 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		chunk->bucket = bucket;
bucket            252 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		chunk->num_of_entries = bucket->num_of_entries;
bucket            253 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
bucket            262 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		list_add(&chunk->chunk_list, &bucket->free_list);
bucket            263 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket->free_list_count++;
bucket            264 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket->total_chunks++;
bucket            285 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
bucket            288 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->total_chunks--;
bucket            290 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
bucket            297 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			       struct mlx5dr_icm_bucket *bucket,
bucket            301 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket->entry_size = DR_STE_SIZE;
bucket            303 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket->entry_size = DR_MODIFY_ACTION_SIZE;
bucket            305 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
bucket            306 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->pool = pool;
bucket            307 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_init(&bucket->mutex);
bucket            308 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	INIT_LIST_HEAD(&bucket->free_list);
bucket            309 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	INIT_LIST_HEAD(&bucket->used_list);
bucket            310 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	INIT_LIST_HEAD(&bucket->hot_list);
bucket            311 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	INIT_LIST_HEAD(&bucket->sync_list);
bucket            314 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
bucket            318 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_destroy(&bucket->mutex);
bucket            319 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
bucket            320 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
bucket            322 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
bucket            325 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	WARN_ON(bucket->total_chunks != 0);
bucket            328 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
bucket            345 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				     struct mlx5dr_icm_bucket *bucket)
bucket            350 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
bucket            356 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
bucket            358 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
bucket            359 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->sync_list_count += bucket->hot_list_count;
bucket            360 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->hot_list_count = 0;
bucket            363 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
bucket            365 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
bucket            366 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->free_list_count += bucket->sync_list_count;
bucket            367 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->sync_list_count = 0;
bucket            370 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
bucket            372 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
bucket            373 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->hot_list_count += bucket->sync_list_count;
bucket            374 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->sync_list_count = 0;
bucket            381 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket;
bucket            385 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
bucket            386 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (bucket == cb) {
bucket            387 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_bucket_start(bucket);
bucket            394 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (mutex_trylock(&bucket->mutex)) {
bucket            395 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_bucket_start(bucket);
bucket            405 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket;
bucket            409 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
bucket            410 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (bucket == cb) {
bucket            411 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_bucket_end(bucket);
bucket            418 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_chill_bucket_end(bucket);
bucket            419 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		mutex_unlock(&bucket->mutex);
bucket            427 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket;
bucket            431 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
bucket            432 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (bucket == cb) {
bucket            433 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_bucket_abort(bucket);
bucket            440 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_chill_bucket_abort(bucket);
bucket            441 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		mutex_unlock(&bucket->mutex);
bucket            454 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket;
bucket            460 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket = &pool->buckets[chunk_size];
bucket            462 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_lock(&bucket->mutex);
bucket            465 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (list_empty(&bucket->free_list)) {
bucket            466 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (dr_icm_reuse_hot_entries(pool, bucket)) {
bucket            467 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_start(pool, bucket, buckets);
bucket            470 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				dr_icm_chill_buckets_abort(pool, bucket, buckets);
bucket            475 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_end(pool, bucket, buckets);
bucket            477 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chunks_create(bucket);
bucket            481 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (!list_empty(&bucket->free_list)) {
bucket            482 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		chunk = list_last_entry(&bucket->free_list,
bucket            487 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			list_add_tail(&chunk->chunk_list, &bucket->used_list);
bucket            488 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			bucket->free_list_count--;
bucket            489 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			bucket->used_list_count++;
bucket            493 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_unlock(&bucket->mutex);
bucket            499 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
bucket            501 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
bucket            503 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		       bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
bucket            505 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		       bucket->num_of_entries * DR_STE_SIZE_REDUCED);
bucket            508 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_lock(&bucket->mutex);
bucket            510 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	list_add_tail(&chunk->chunk_list, &bucket->hot_list);
bucket            511 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->hot_list_count++;
bucket            512 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket->used_list_count--;
bucket            513 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	mutex_unlock(&bucket->mutex);
bucket            789 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h 	struct mlx5dr_icm_bucket *bucket;
bucket           1252 drivers/net/gtp.c 	int i, j, bucket = cb->args[0], skip = cb->args[1];
bucket           1269 drivers/net/gtp.c 		for (i = bucket; i < gtp->hash_size; i++) {
bucket           1288 drivers/net/gtp.c 		bucket = 0;
bucket            698 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	u32 bucket;
bucket            742 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			bucket = __ffs(cfg->int_escan_map);
bucket            743 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 			cfg->int_escan_map &= ~BIT(bucket);
bucket            745 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 							       bucket);
bucket           1011 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h 	struct brcmf_gscan_bucket_config bucket[1];
bucket            433 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	memcpy(&gscan_cfg->bucket[0], buckets,
bucket            548 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
bucket            554 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	if (bucket < pi->n_reqs)
bucket            555 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 		reqid = pi->reqs[bucket]->reqid;
bucket             61 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
bucket            292 drivers/scsi/hpsa.c static void calc_bucket_map(int *bucket, int num_buckets,
bucket           9091 drivers/scsi/hpsa.c static void  calc_bucket_map(int bucket[], int num_buckets,
bucket           9103 drivers/scsi/hpsa.c 			if (bucket[j] >= size) {
bucket             85 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		int bucket = cxgbit_np_hashfn(cnp);
bucket             90 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		p->next = cdev->np_hash_tab[bucket];
bucket             91 drivers/target/iscsi/cxgbit/cxgbit_cm.c 		cdev->np_hash_tab[bucket] = p;
bucket            101 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
bucket            105 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
bucket            118 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	int stid = -1, bucket = cxgbit_np_hashfn(cnp);
bucket            119 drivers/target/iscsi/cxgbit/cxgbit_cm.c 	struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
bucket            759 fs/btrfs/compression.c 	struct bucket_item *bucket;
bucket            794 fs/btrfs/compression.c 	kfree(workspace->bucket);
bucket            811 fs/btrfs/compression.c 	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
bucket            812 fs/btrfs/compression.c 	if (!ws->bucket)
bucket           1255 fs/btrfs/compression.c 	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
bucket           1256 fs/btrfs/compression.c 		p = ws->bucket[i].count;
bucket           1388 fs/btrfs/compression.c 	struct bucket_item *bucket = ws->bucket;
bucket           1391 fs/btrfs/compression.c 	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
bucket           1394 fs/btrfs/compression.c 		coreset_sum += bucket[i].count;
bucket           1399 fs/btrfs/compression.c 	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
bucket           1400 fs/btrfs/compression.c 		coreset_sum += bucket[i].count;
bucket           1427 fs/btrfs/compression.c 		if (ws->bucket[i].count > 0)
bucket           1437 fs/btrfs/compression.c 		if (ws->bucket[i].count > 0) {
bucket           1539 fs/btrfs/compression.c 	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
bucket           1543 fs/btrfs/compression.c 		ws->bucket[byte].count++;
bucket            347 fs/btrfs/raid56.c 	int bucket = rbio_bucket(rbio);
bucket            359 fs/btrfs/raid56.c 	h = table->table + bucket;
bucket            674 fs/btrfs/raid56.c 	int bucket = rbio_bucket(rbio);
bucket            675 fs/btrfs/raid56.c 	struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
bucket            760 fs/btrfs/raid56.c 	int bucket;
bucket            765 fs/btrfs/raid56.c 	bucket = rbio_bucket(rbio);
bucket            766 fs/btrfs/raid56.c 	h = rbio->fs_info->stripe_hash_table->table + bucket;
bucket            161 fs/cifs/dfs_cache.c 	int bucket;
bucket            170 fs/cifs/dfs_cache.c 	hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
bucket            434 fs/cifs/dfs_cache.c 	int bucket;
bucket            439 fs/cifs/dfs_cache.c 	hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) {
bucket            368 fs/dlm/debug_fs.c 	unsigned bucket;
bucket            427 fs/dlm/debug_fs.c 	unsigned bucket, entry;
bucket            430 fs/dlm/debug_fs.c 	bucket = n >> 32;
bucket            433 fs/dlm/debug_fs.c 	if (bucket >= ls->ls_rsbtbl_size)
bucket            450 fs/dlm/debug_fs.c 	tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
bucket            452 fs/dlm/debug_fs.c 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            459 fs/dlm/debug_fs.c 				ri->bucket = bucket;
bucket            460 fs/dlm/debug_fs.c 				spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            465 fs/dlm/debug_fs.c 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            475 fs/dlm/debug_fs.c 		bucket++;
bucket            478 fs/dlm/debug_fs.c 		if (bucket >= ls->ls_rsbtbl_size) {
bucket            482 fs/dlm/debug_fs.c 		tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
bucket            484 fs/dlm/debug_fs.c 		spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            490 fs/dlm/debug_fs.c 			ri->bucket = bucket;
bucket            491 fs/dlm/debug_fs.c 			spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            495 fs/dlm/debug_fs.c 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            507 fs/dlm/debug_fs.c 	unsigned bucket;
bucket            510 fs/dlm/debug_fs.c 	bucket = n >> 32;
bucket            516 fs/dlm/debug_fs.c 	spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            524 fs/dlm/debug_fs.c 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            529 fs/dlm/debug_fs.c 	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            540 fs/dlm/debug_fs.c 		bucket++;
bucket            543 fs/dlm/debug_fs.c 		if (bucket >= ls->ls_rsbtbl_size) {
bucket            547 fs/dlm/debug_fs.c 		tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
bucket            549 fs/dlm/debug_fs.c 		spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            555 fs/dlm/debug_fs.c 			ri->bucket = bucket;
bucket            556 fs/dlm/debug_fs.c 			spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            560 fs/dlm/debug_fs.c 		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            201 fs/dlm/dir.c   	uint32_t hash, bucket;
bucket            205 fs/dlm/dir.c   	bucket = hash & (ls->ls_rsbtbl_size - 1);
bucket            207 fs/dlm/dir.c   	spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            208 fs/dlm/dir.c   	rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r);
bucket            210 fs/dlm/dir.c   		rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
bucket            212 fs/dlm/dir.c   	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket            350 fs/dlm/lock.c  	uint32_t bucket = r->res_bucket;
bucket            352 fs/dlm/lock.c  	spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket            354 fs/dlm/lock.c  	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket           5471 fs/dlm/lock.c  static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
bucket           5476 fs/dlm/lock.c  	spin_lock(&ls->ls_rsbtbl[bucket].lock);
bucket           5477 fs/dlm/lock.c  	for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
bucket           5487 fs/dlm/lock.c  		spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket           5490 fs/dlm/lock.c  	spin_unlock(&ls->ls_rsbtbl[bucket].lock);
bucket           5514 fs/dlm/lock.c  	int bucket = 0;
bucket           5520 fs/dlm/lock.c  		r = find_grant_rsb(ls, bucket);
bucket           5522 fs/dlm/lock.c  			if (bucket == ls->ls_rsbtbl_size - 1)
bucket           5524 fs/dlm/lock.c  			bucket++;
bucket            200 fs/fscache/cookie.c 	unsigned int bucket;
bucket            202 fs/fscache/cookie.c 	bucket = candidate->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
bucket            203 fs/fscache/cookie.c 	h = &fscache_cookie_hash[bucket];
bucket            843 fs/fscache/cookie.c 	unsigned int bucket;
bucket            845 fs/fscache/cookie.c 	bucket = cookie->key_hash & (ARRAY_SIZE(fscache_cookie_hash) - 1);
bucket            846 fs/fscache/cookie.c 	h = &fscache_cookie_hash[bucket];
bucket            141 fs/jffs2/gc.c  		int bucket, want_ino;
bucket            158 fs/jffs2/gc.c  		for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
bucket            159 fs/jffs2/gc.c  			for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
bucket             76 fs/nfs/pnfs_nfs.c 		struct pnfs_commit_bucket *bucket;
bucket             78 fs/nfs/pnfs_nfs.c 		bucket = list_first_entry(&req->wb_list,
bucket             81 fs/nfs/pnfs_nfs.c 		freeme = bucket->wlseg;
bucket             82 fs/nfs/pnfs_nfs.c 		bucket->wlseg = NULL;
bucket             91 fs/nfs/pnfs_nfs.c pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
bucket             95 fs/nfs/pnfs_nfs.c 	struct list_head *src = &bucket->written;
bucket             96 fs/nfs/pnfs_nfs.c 	struct list_head *dst = &bucket->committing;
bucket            104 fs/nfs/pnfs_nfs.c 		if (bucket->clseg == NULL)
bucket            105 fs/nfs/pnfs_nfs.c 			bucket->clseg = pnfs_get_lseg(bucket->wlseg);
bucket            107 fs/nfs/pnfs_nfs.c 			pnfs_put_lseg(bucket->wlseg);
bucket            108 fs/nfs/pnfs_nfs.c 			bucket->wlseg = NULL;
bucket            162 fs/nfs/pnfs_nfs.c 	struct pnfs_commit_bucket *bucket;
bucket            170 fs/nfs/pnfs_nfs.c 		bucket = &fl_cinfo->buckets[i];
bucket            171 fs/nfs/pnfs_nfs.c 		if (list_empty(&bucket->committing))
bucket            173 fs/nfs/pnfs_nfs.c 		freeme = bucket->clseg;
bucket            174 fs/nfs/pnfs_nfs.c 		bucket->clseg = NULL;
bucket            175 fs/nfs/pnfs_nfs.c 		list_for_each(pos, &bucket->committing)
bucket            177 fs/nfs/pnfs_nfs.c 		list_splice_init(&bucket->committing, &pages);
bucket            191 fs/nfs/pnfs_nfs.c 	struct pnfs_commit_bucket *bucket;
bucket            197 fs/nfs/pnfs_nfs.c 	bucket = fl_cinfo->buckets;
bucket            198 fs/nfs/pnfs_nfs.c 	for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
bucket            199 fs/nfs/pnfs_nfs.c 		if (list_empty(&bucket->committing))
bucket            219 fs/nfs/pnfs_nfs.c 	struct pnfs_commit_bucket *bucket;
bucket            222 fs/nfs/pnfs_nfs.c 	bucket = &cinfo->ds->buckets[data->ds_commit_index];
bucket            224 fs/nfs/pnfs_nfs.c 	list_for_each(pos, &bucket->committing)
bucket            226 fs/nfs/pnfs_nfs.c 	list_splice_init(&bucket->committing, pages);
bucket            227 fs/nfs/pnfs_nfs.c 	data->lseg = bucket->clseg;
bucket            228 fs/nfs/pnfs_nfs.c 	bucket->clseg = NULL;
bucket            409 fs/ocfs2/dlm/dlmdebug.c 	struct hlist_head *bucket;
bucket            418 fs/ocfs2/dlm/dlmdebug.c 		bucket = dlm_master_hash(dlm, i);
bucket            419 fs/ocfs2/dlm/dlmdebug.c 		hlist_for_each_entry(mle, bucket, master_hash_node) {
bucket            160 fs/ocfs2/dlm/dlmdomain.c 	struct hlist_head *bucket;
bucket            164 fs/ocfs2/dlm/dlmdomain.c 	bucket = dlm_lockres_hash(dlm, res->lockname.hash);
bucket            169 fs/ocfs2/dlm/dlmdomain.c 	hlist_add_head(&res->hash_node, bucket);
bucket            180 fs/ocfs2/dlm/dlmdomain.c 	struct hlist_head *bucket;
bucket            187 fs/ocfs2/dlm/dlmdomain.c 	bucket = dlm_lockres_hash(dlm, hash);
bucket            189 fs/ocfs2/dlm/dlmdomain.c 	hlist_for_each_entry(res, bucket, hash_node) {
bucket            410 fs/ocfs2/dlm/dlmdomain.c 	struct hlist_head *bucket;
bucket            420 fs/ocfs2/dlm/dlmdomain.c 		bucket = dlm_lockres_hash(dlm, i);
bucket            421 fs/ocfs2/dlm/dlmdomain.c 		iter = bucket->first;
bucket            315 fs/ocfs2/dlm/dlmmaster.c 	struct hlist_head *bucket;
bucket            319 fs/ocfs2/dlm/dlmmaster.c 	bucket = dlm_master_hash(dlm, mle->mnamehash);
bucket            320 fs/ocfs2/dlm/dlmmaster.c 	hlist_add_head(&mle->master_hash_node, bucket);
bucket            329 fs/ocfs2/dlm/dlmmaster.c 	struct hlist_head *bucket;
bucket            335 fs/ocfs2/dlm/dlmmaster.c 	bucket = dlm_master_hash(dlm, hash);
bucket            336 fs/ocfs2/dlm/dlmmaster.c 	hlist_for_each_entry(tmpmle, bucket, master_hash_node) {
bucket           3336 fs/ocfs2/dlm/dlmmaster.c 	struct hlist_head *bucket;
bucket           3347 fs/ocfs2/dlm/dlmmaster.c 		bucket = dlm_master_hash(dlm, i);
bucket           3348 fs/ocfs2/dlm/dlmmaster.c 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
bucket           3536 fs/ocfs2/dlm/dlmmaster.c 	struct hlist_head *bucket;
bucket           3553 fs/ocfs2/dlm/dlmmaster.c 		bucket = dlm_master_hash(dlm, i);
bucket           3554 fs/ocfs2/dlm/dlmmaster.c 		hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
bucket           2141 fs/ocfs2/dlm/dlmrecovery.c 	struct hlist_head *bucket;
bucket           2170 fs/ocfs2/dlm/dlmrecovery.c 		bucket = dlm_lockres_hash(dlm, i);
bucket           2171 fs/ocfs2/dlm/dlmrecovery.c 		hlist_for_each_entry(res, bucket, hash_node) {
bucket           2333 fs/ocfs2/dlm/dlmrecovery.c 	struct hlist_head *bucket;
bucket           2356 fs/ocfs2/dlm/dlmrecovery.c 		bucket = dlm_lockres_hash(dlm, i);
bucket           2357 fs/ocfs2/dlm/dlmrecovery.c 		hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
bucket           1788 fs/ocfs2/ocfs2_trace.h 	 unsigned int hash, unsigned long long bucket,			\
bucket           1790 fs/ocfs2/ocfs2_trace.h 	TP_ARGS(ino, name, name_index, hash, bucket, xe_index))
bucket            127 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket;
bucket            281 fs/ocfs2/xattr.c 					struct ocfs2_xattr_bucket *bucket,
bucket            303 fs/ocfs2/xattr.c 					   struct ocfs2_xattr_bucket *bucket,
bucket            324 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket;
bucket            329 fs/ocfs2/xattr.c 	bucket = kzalloc(sizeof(struct ocfs2_xattr_bucket), GFP_NOFS);
bucket            330 fs/ocfs2/xattr.c 	if (bucket) {
bucket            331 fs/ocfs2/xattr.c 		bucket->bu_inode = inode;
bucket            332 fs/ocfs2/xattr.c 		bucket->bu_blocks = blks;
bucket            335 fs/ocfs2/xattr.c 	return bucket;
bucket            338 fs/ocfs2/xattr.c static void ocfs2_xattr_bucket_relse(struct ocfs2_xattr_bucket *bucket)
bucket            342 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++) {
bucket            343 fs/ocfs2/xattr.c 		brelse(bucket->bu_bhs[i]);
bucket            344 fs/ocfs2/xattr.c 		bucket->bu_bhs[i] = NULL;
bucket            348 fs/ocfs2/xattr.c static void ocfs2_xattr_bucket_free(struct ocfs2_xattr_bucket *bucket)
bucket            350 fs/ocfs2/xattr.c 	if (bucket) {
bucket            351 fs/ocfs2/xattr.c 		ocfs2_xattr_bucket_relse(bucket);
bucket            352 fs/ocfs2/xattr.c 		bucket->bu_inode = NULL;
bucket            353 fs/ocfs2/xattr.c 		kfree(bucket);
bucket            363 fs/ocfs2/xattr.c static int ocfs2_init_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
bucket            368 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++) {
bucket            369 fs/ocfs2/xattr.c 		bucket->bu_bhs[i] = sb_getblk(bucket->bu_inode->i_sb,
bucket            371 fs/ocfs2/xattr.c 		if (!bucket->bu_bhs[i]) {
bucket            377 fs/ocfs2/xattr.c 		if (!ocfs2_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket            378 fs/ocfs2/xattr.c 					   bucket->bu_bhs[i])) {
bucket            380 fs/ocfs2/xattr.c 				ocfs2_set_new_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket            381 fs/ocfs2/xattr.c 							      bucket->bu_bhs[i]);
bucket            383 fs/ocfs2/xattr.c 				set_buffer_uptodate(bucket->bu_bhs[i]);
bucket            384 fs/ocfs2/xattr.c 				ocfs2_set_buffer_uptodate(INODE_CACHE(bucket->bu_inode),
bucket            385 fs/ocfs2/xattr.c 							  bucket->bu_bhs[i]);
bucket            391 fs/ocfs2/xattr.c 		ocfs2_xattr_bucket_relse(bucket);
bucket            396 fs/ocfs2/xattr.c static int ocfs2_read_xattr_bucket(struct ocfs2_xattr_bucket *bucket,
bucket            401 fs/ocfs2/xattr.c 	rc = ocfs2_read_blocks(INODE_CACHE(bucket->bu_inode), xb_blkno,
bucket            402 fs/ocfs2/xattr.c 			       bucket->bu_blocks, bucket->bu_bhs, 0,
bucket            405 fs/ocfs2/xattr.c 		spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
bucket            406 fs/ocfs2/xattr.c 		rc = ocfs2_validate_meta_ecc_bhs(bucket->bu_inode->i_sb,
bucket            407 fs/ocfs2/xattr.c 						 bucket->bu_bhs,
bucket            408 fs/ocfs2/xattr.c 						 bucket->bu_blocks,
bucket            409 fs/ocfs2/xattr.c 						 &bucket_xh(bucket)->xh_check);
bucket            410 fs/ocfs2/xattr.c 		spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
bucket            416 fs/ocfs2/xattr.c 		ocfs2_xattr_bucket_relse(bucket);
bucket            421 fs/ocfs2/xattr.c 					     struct ocfs2_xattr_bucket *bucket,
bucket            426 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++) {
bucket            428 fs/ocfs2/xattr.c 					  INODE_CACHE(bucket->bu_inode),
bucket            429 fs/ocfs2/xattr.c 					  bucket->bu_bhs[i], type);
bucket            440 fs/ocfs2/xattr.c 					     struct ocfs2_xattr_bucket *bucket)
bucket            444 fs/ocfs2/xattr.c 	spin_lock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
bucket            445 fs/ocfs2/xattr.c 	ocfs2_compute_meta_ecc_bhs(bucket->bu_inode->i_sb,
bucket            446 fs/ocfs2/xattr.c 				   bucket->bu_bhs, bucket->bu_blocks,
bucket            447 fs/ocfs2/xattr.c 				   &bucket_xh(bucket)->xh_check);
bucket            448 fs/ocfs2/xattr.c 	spin_unlock(&OCFS2_SB(bucket->bu_inode->i_sb)->osb_xattr_lock);
bucket            450 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++)
bucket            451 fs/ocfs2/xattr.c 		ocfs2_journal_dirty(handle, bucket->bu_bhs[i]);
bucket           1216 fs/ocfs2/xattr.c 	xs->bucket = ocfs2_xattr_bucket_new(inode);
bucket           1217 fs/ocfs2/xattr.c 	if (!xs->bucket) {
bucket           1247 fs/ocfs2/xattr.c 								bucket_xh(xs->bucket),
bucket           1255 fs/ocfs2/xattr.c 			xs->base = bucket_block(xs->bucket, block_off);
bucket           1273 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_free(xs->bucket);
bucket           1707 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
bucket           1709 fs/ocfs2/xattr.c 	return ocfs2_xattr_bucket_journal_access(handle, bucket, type);
bucket           1715 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
bucket           1717 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_journal_dirty(handle, bucket);
bucket           1723 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
bucket           1730 fs/ocfs2/xattr.c 	return bucket_block(bucket, block) + block_offset;
bucket           1742 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
bucket           1743 fs/ocfs2/xattr.c 	return le16_to_cpu(bucket_xh(bucket)->xh_free_start);
bucket           1864 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = loc->xl_storage;
bucket           1874 fs/ocfs2/xattr.c 	BUG_ON(!bucket->bu_bhs[block_offset]);
bucket           1877 fs/ocfs2/xattr.c 	vb->vb_bh = bucket->bu_bhs[block_offset];
bucket           2306 fs/ocfs2/xattr.c 					   struct ocfs2_xattr_bucket *bucket,
bucket           2309 fs/ocfs2/xattr.c 	loc->xl_inode = bucket->bu_inode;
bucket           2311 fs/ocfs2/xattr.c 	loc->xl_storage = bucket;
bucket           2312 fs/ocfs2/xattr.c 	loc->xl_header = bucket_xh(bucket);
bucket           3094 fs/ocfs2/xattr.c 							bucket_xh(xbs->bucket),
bucket           3097 fs/ocfs2/xattr.c 			base = bucket_block(xbs->bucket, block_off);
bucket           3489 fs/ocfs2/xattr.c 		xbs.bucket = ocfs2_xattr_bucket_new(inode);
bucket           3490 fs/ocfs2/xattr.c 		if (!xbs.bucket) {
bucket           3515 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_free(xbs.bucket);
bucket           3566 fs/ocfs2/xattr.c 	xbs.bucket = ocfs2_xattr_bucket_new(inode);
bucket           3567 fs/ocfs2/xattr.c 	if (!xbs.bucket) {
bucket           3675 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_free(xbs.bucket);
bucket           3745 fs/ocfs2/xattr.c 				struct ocfs2_xattr_bucket *bucket,
bucket           3749 fs/ocfs2/xattr.c 				   struct ocfs2_xattr_bucket *bucket,
bucket           3757 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           3791 fs/ocfs2/xattr.c 		xe_name = bucket_block(bucket, block_off) + new_offset;
bucket           3826 fs/ocfs2/xattr.c 	int low_bucket = 0, bucket, high_bucket;
bucket           3848 fs/ocfs2/xattr.c 		bucket = (low_bucket + high_bucket) / 2;
bucket           3849 fs/ocfs2/xattr.c 		blkno = p_blkno + bucket * blk_per_bucket;
bucket           3859 fs/ocfs2/xattr.c 			high_bucket = bucket - 1;
bucket           3875 fs/ocfs2/xattr.c 			low_bucket = bucket + 1;
bucket           3899 fs/ocfs2/xattr.c 	ret = ocfs2_read_xattr_bucket(xs->bucket, lower_blkno);
bucket           3905 fs/ocfs2/xattr.c 	xs->header = bucket_xh(xs->bucket);
bucket           3906 fs/ocfs2/xattr.c 	xs->base = bucket_block(xs->bucket, 0);
bucket           3913 fs/ocfs2/xattr.c 			(unsigned long long)bucket_blkno(xs->bucket),
bucket           3976 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket;
bucket           3978 fs/ocfs2/xattr.c 	bucket = ocfs2_xattr_bucket_new(inode);
bucket           3979 fs/ocfs2/xattr.c 	if (!bucket) {
bucket           3988 fs/ocfs2/xattr.c 	for (i = 0; i < num_buckets; i++, blkno += bucket->bu_blocks) {
bucket           3989 fs/ocfs2/xattr.c 		ret = ocfs2_read_xattr_bucket(bucket, blkno);
bucket           4000 fs/ocfs2/xattr.c 			num_buckets = le16_to_cpu(bucket_xh(bucket)->xh_num_buckets);
bucket           4003 fs/ocfs2/xattr.c 		     le32_to_cpu(bucket_xh(bucket)->xh_entries[0].xe_name_hash));
bucket           4005 fs/ocfs2/xattr.c 			ret = func(inode, bucket, para);
bucket           4011 fs/ocfs2/xattr.c 		ocfs2_xattr_bucket_relse(bucket);
bucket           4016 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_free(bucket);
bucket           4046 fs/ocfs2/xattr.c 				   struct ocfs2_xattr_bucket *bucket,
bucket           4054 fs/ocfs2/xattr.c 	for (i = 0 ; i < le16_to_cpu(bucket_xh(bucket)->xh_count); i++) {
bucket           4055 fs/ocfs2/xattr.c 		struct ocfs2_xattr_entry *entry = &bucket_xh(bucket)->xh_entries[i];
bucket           4059 fs/ocfs2/xattr.c 							bucket_xh(bucket),
bucket           4066 fs/ocfs2/xattr.c 		name = (const char *)bucket_block(bucket, block_off) +
bucket           4186 fs/ocfs2/xattr.c 					   struct ocfs2_xattr_bucket *bucket)
bucket           4195 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           4198 fs/ocfs2/xattr.c 	char *target = bucket_block(bucket, blks - 1);
bucket           4202 fs/ocfs2/xattr.c 				(unsigned long long)bucket_blkno(bucket));
bucket           4205 fs/ocfs2/xattr.c 		memset(bucket_block(bucket, i), 0, blocksize);
bucket           4227 fs/ocfs2/xattr.c 	target = bucket_block(bucket, 0);
bucket           4261 fs/ocfs2/xattr.c 	xs->header = bucket_xh(xs->bucket);
bucket           4262 fs/ocfs2/xattr.c 	xs->base = bucket_block(xs->bucket, 0);
bucket           4291 fs/ocfs2/xattr.c 	BUG_ON(!xs->bucket);
bucket           4323 fs/ocfs2/xattr.c 	ret = ocfs2_init_xattr_bucket(xs->bucket, blkno, 1);
bucket           4329 fs/ocfs2/xattr.c 	ret = ocfs2_xattr_bucket_journal_access(handle, xs->bucket,
bucket           4336 fs/ocfs2/xattr.c 	ocfs2_cp_xattr_block_to_bucket(inode, xb_bh, xs->bucket);
bucket           4337 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_journal_dirty(handle, xs->bucket);
bucket           4387 fs/ocfs2/xattr.c 				     struct ocfs2_xattr_bucket *bucket)
bucket           4393 fs/ocfs2/xattr.c 	u64 blkno = bucket_blkno(bucket);
bucket           4411 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
bucket           4412 fs/ocfs2/xattr.c 		memcpy(buf, bucket_block(bucket, i), blocksize);
bucket           4414 fs/ocfs2/xattr.c 	ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
bucket           4481 fs/ocfs2/xattr.c 	for (i = 0; i < bucket->bu_blocks; i++, buf += blocksize)
bucket           4482 fs/ocfs2/xattr.c 		memcpy(bucket_block(bucket, i), buf, blocksize);
bucket           4483 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_journal_dirty(handle, bucket);
bucket           5355 fs/ocfs2/xattr.c 					     struct ocfs2_xattr_bucket *bucket,
bucket           5363 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           5381 fs/ocfs2/xattr.c 	vb.vb_bh = bucket->bu_bhs[value_blk];
bucket           5395 fs/ocfs2/xattr.c 			(unsigned long long)bucket_blkno(bucket), xe_off, len);
bucket           5402 fs/ocfs2/xattr.c 	ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
bucket           5411 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
bucket           5519 fs/ocfs2/xattr.c 					      struct ocfs2_xattr_bucket *bucket,
bucket           5522 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           5532 fs/ocfs2/xattr.c 		     (unsigned long long)bucket_blkno(bucket),
bucket           5554 fs/ocfs2/xattr.c 	ocfs2_init_xattr_bucket_xa_loc(&loc, xs->bucket,
bucket           5568 fs/ocfs2/xattr.c 					xs->bucket);
bucket           5613 fs/ocfs2/xattr.c 						 xs->bucket,
bucket           5622 fs/ocfs2/xattr.c 					 xs->bucket,
bucket           5636 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_relse(xs->bucket);
bucket           5654 fs/ocfs2/xattr.c 					struct ocfs2_xattr_bucket *bucket,
bucket           5658 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           5676 fs/ocfs2/xattr.c 		ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket,
bucket           5696 fs/ocfs2/xattr.c 		ret = ocfs2_xattr_bucket_value_truncate(inode, bucket,
bucket           5730 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket =
bucket           5733 fs/ocfs2/xattr.c 	ret = ocfs2_xattr_bucket_journal_access(handle, bucket,
bucket           5740 fs/ocfs2/xattr.c 	ocfs2_xattr_bucket_journal_dirty(handle, bucket);
bucket           5776 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket = NULL;
bucket           5799 fs/ocfs2/xattr.c 							bucket_xh(xbs->bucket),
bucket           5806 fs/ocfs2/xattr.c 			base = bucket_block(xbs->bucket, block_off);
bucket           5807 fs/ocfs2/xattr.c 			vb.vb_bh = xbs->bucket->bu_bhs[block_off];
bucket           5812 fs/ocfs2/xattr.c 				bucket = xbs->bucket;
bucket           5813 fs/ocfs2/xattr.c 				refcount.credits = bucket->bu_blocks;
bucket           5814 fs/ocfs2/xattr.c 				refcount.para = bucket;
bucket           6001 fs/ocfs2/xattr.c 					   struct ocfs2_xattr_bucket *bucket,
bucket           6007 fs/ocfs2/xattr.c 	struct ocfs2_xattr_header *xh = bucket_xh(bucket);
bucket           6012 fs/ocfs2/xattr.c 						bucket_xh(bucket),
bucket           6021 fs/ocfs2/xattr.c 	base = bucket_block(bucket, block_off);
bucket           6027 fs/ocfs2/xattr.c 		*bh = bucket->bu_bhs[block_off];
bucket           6037 fs/ocfs2/xattr.c 					     struct ocfs2_xattr_bucket *bucket,
bucket           6045 fs/ocfs2/xattr.c 			(struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
bucket           6051 fs/ocfs2/xattr.c 		.credits = bucket->bu_blocks,
bucket           6052 fs/ocfs2/xattr.c 		.para = bucket,
bucket           6062 fs/ocfs2/xattr.c 				(unsigned long long)bucket_blkno(bucket),
bucket           6070 fs/ocfs2/xattr.c 		ret = ocfs2_get_xattr_tree_value_root(inode->i_sb, bucket, i,
bucket           6692 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket;
bucket           6695 fs/ocfs2/xattr.c 		bucket = args->old_bucket;
bucket           6697 fs/ocfs2/xattr.c 		bucket = args->new_bucket;
bucket           6699 fs/ocfs2/xattr.c 	return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
bucket           6717 fs/ocfs2/xattr.c 	struct ocfs2_xattr_bucket *bucket =
bucket           6720 fs/ocfs2/xattr.c 	return ocfs2_get_xattr_tree_value_root(sb, bucket, offset,
bucket           6725 fs/ocfs2/xattr.c 				      struct ocfs2_xattr_bucket *bucket,
bucket           6731 fs/ocfs2/xattr.c 			(struct ocfs2_xattr_header *)bucket->bu_bhs[0]->b_data;
bucket           6734 fs/ocfs2/xattr.c 	metas->credits += bucket->bu_blocks;
bucket           6735 fs/ocfs2/xattr.c 	return ocfs2_value_metas_in_xattr_header(inode->i_sb, bucket->bu_bhs[0],
bucket           6739 fs/ocfs2/xattr.c 					bucket);
bucket             28 fs/omfs/dir.c  	int bucket = omfs_hash(name, namelen, nbuckets);
bucket             30 fs/omfs/dir.c  	*ofs = OMFS_DIR_START + bucket * 8;
bucket           1099 fs/seq_file.c  		struct hlist_head *bucket = per_cpu_ptr(head, *cpu);
bucket           1101 fs/seq_file.c  		if (!hlist_empty(bucket))
bucket           1102 fs/seq_file.c  			return bucket->first;
bucket            293 fs/xfs/libxfs/xfs_ag.c 	int			bucket;
bucket            302 fs/xfs/libxfs/xfs_ag.c 	for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
bucket            303 fs/xfs/libxfs/xfs_ag.c 		agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
bucket            313 fs/xfs/libxfs/xfs_ag.c 	int			bucket;
bucket            331 fs/xfs/libxfs/xfs_ag.c 	for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
bucket            332 fs/xfs/libxfs/xfs_ag.c 		agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
bucket           1940 fs/xfs/xfs_log_recover.c 	struct list_head	*bucket;
bucket           1955 fs/xfs/xfs_log_recover.c 	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
bucket           1956 fs/xfs/xfs_log_recover.c 	list_for_each_entry(bcp, bucket, bc_list) {
bucket           1969 fs/xfs/xfs_log_recover.c 	list_add_tail(&bcp->bc_list, bucket);
bucket           1987 fs/xfs/xfs_log_recover.c 	struct list_head	*bucket;
bucket           1996 fs/xfs/xfs_log_recover.c 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
bucket           1997 fs/xfs/xfs_log_recover.c 	list_for_each_entry(bcp, bucket, bc_list) {
bucket           4934 fs/xfs/xfs_log_recover.c 	int		bucket)
bucket           4951 fs/xfs/xfs_log_recover.c 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
bucket           4953 fs/xfs/xfs_log_recover.c 		 (sizeof(xfs_agino_t) * bucket);
bucket           4974 fs/xfs/xfs_log_recover.c 	int				bucket)
bucket           5022 fs/xfs/xfs_log_recover.c 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
bucket           5058 fs/xfs/xfs_log_recover.c 	int		bucket;
bucket           5089 fs/xfs/xfs_log_recover.c 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
bucket           5090 fs/xfs/xfs_log_recover.c 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
bucket           5093 fs/xfs/xfs_log_recover.c 							agno, agino, bucket);
bucket           3372 fs/xfs/xfs_trace.h 	TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int bucket,
bucket           3374 fs/xfs/xfs_trace.h 	TP_ARGS(mp, agno, bucket, old_ptr, new_ptr),
bucket           3378 fs/xfs/xfs_trace.h 		__field(unsigned int, bucket)
bucket           3385 fs/xfs/xfs_trace.h 		__entry->bucket = bucket;
bucket           3392 fs/xfs/xfs_trace.h 		  __entry->bucket,
bucket            149 include/linux/atalk.h 	int bucket;
bucket            336 include/linux/rhashtable.h 				   struct rhash_lock_head **bucket,
bucket            340 include/linux/rhashtable.h 	bit_spin_lock(0, (unsigned long *)bucket);
bucket            383 include/net/neighbour.h 	unsigned int bucket;
bucket             47 include/net/ping.h 	int			bucket;
bucket             46 include/net/raw.h 	int bucket;
bucket           1915 include/net/tcp.h 	int			bucket, offset, sbucket, num;
bucket             48 include/net/transp_v6.h 			       __u16 srcp, __u16 destp, int rqueue, int bucket);
bucket             51 include/net/transp_v6.h 			__u16 destp, int bucket)
bucket             54 include/net/transp_v6.h 				  bucket);
bucket            442 include/net/udp.h 	int			bucket;
bucket             68 include/trace/events/bcache.h 		__field(size_t,		bucket			)
bucket             72 include/trace/events/bcache.h 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
bucket             75 include/trace/events/bcache.h 	TP_printk("bucket %zu", __entry->bucket)
bucket            267 include/trace/events/bcache.h 		__field(size_t,		bucket			)
bucket            273 include/trace/events/bcache.h 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
bucket            278 include/trace/events/bcache.h 	TP_printk("bucket %zu", __entry->bucket)
bucket            369 include/trace/events/bcache.h 		__field(size_t,		bucket			)
bucket            374 include/trace/events/bcache.h 		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
bucket            378 include/trace/events/bcache.h 	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
bucket            428 include/trace/events/bcache.h 	TP_PROTO(struct cache *ca, size_t bucket),
bucket            429 include/trace/events/bcache.h 	TP_ARGS(ca, bucket),
bucket            439 include/trace/events/bcache.h 		__entry->offset		= bucket << ca->set->bucket_bits;
bucket            440 include/trace/events/bcache.h 		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
bucket            449 include/trace/events/bcache.h 	TP_PROTO(struct cache *ca, size_t bucket),
bucket            450 include/trace/events/bcache.h 	TP_ARGS(ca, bucket),
bucket            459 include/trace/events/bcache.h 		__entry->offset		= bucket << ca->set->bucket_bits;
bucket             27 kernel/bpf/hashtab.c 	struct bucket *buckets;
bucket            343 kernel/bpf/hashtab.c 	    htab->n_buckets > U32_MAX / sizeof(struct bucket))
bucket            346 kernel/bpf/hashtab.c 	cost = (u64) htab->n_buckets * sizeof(struct bucket) +
bucket            362 kernel/bpf/hashtab.c 					   sizeof(struct bucket),
bucket            410 kernel/bpf/hashtab.c static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
bucket            577 kernel/bpf/hashtab.c 	struct bucket *b;
bucket            826 kernel/bpf/hashtab.c 	struct bucket *b;
bucket            918 kernel/bpf/hashtab.c 	struct bucket *b;
bucket            983 kernel/bpf/hashtab.c 	struct bucket *b;
bucket           1036 kernel/bpf/hashtab.c 	struct bucket *b;
bucket           1111 kernel/bpf/hashtab.c 	struct bucket *b;
bucket           1143 kernel/bpf/hashtab.c 	struct bucket *b;
bucket            351 kernel/bpf/stackmap.c 	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
bucket            387 kernel/bpf/stackmap.c 	bucket = READ_ONCE(smap->buckets[id]);
bucket            389 kernel/bpf/stackmap.c 	hash_matches = bucket && bucket->hash == hash;
bucket            405 kernel/bpf/stackmap.c 		if (hash_matches && bucket->nr == trace_nr &&
bucket            406 kernel/bpf/stackmap.c 		    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
bucket            410 kernel/bpf/stackmap.c 		if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
bucket            415 kernel/bpf/stackmap.c 		if (hash_matches && bucket->nr == trace_nr &&
bucket            416 kernel/bpf/stackmap.c 		    memcmp(bucket->data, ips, trace_len) == 0)
bucket            418 kernel/bpf/stackmap.c 		if (bucket && !(flags & BPF_F_REUSE_STACKID))
bucket            523 kernel/bpf/stackmap.c 	struct stack_map_bucket *bucket, *old_bucket;
bucket            529 kernel/bpf/stackmap.c 	bucket = xchg(&smap->buckets[id], NULL);
bucket            530 kernel/bpf/stackmap.c 	if (!bucket)
bucket            533 kernel/bpf/stackmap.c 	trace_len = bucket->nr * stack_map_data_size(map);
bucket            534 kernel/bpf/stackmap.c 	memcpy(value, bucket->data, trace_len);
bucket            537 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], bucket);
bucket            260 kernel/dma/debug.c static void put_hash_bucket(struct hash_bucket *bucket,
bucket            262 kernel/dma/debug.c 	__releases(&bucket->lock)
bucket            266 kernel/dma/debug.c 	spin_unlock_irqrestore(&bucket->lock, __flags);
bucket            291 kernel/dma/debug.c static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
bucket            298 kernel/dma/debug.c 	list_for_each_entry(entry, &bucket->list, list) {
bucket            341 kernel/dma/debug.c static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
bucket            344 kernel/dma/debug.c 	return __hash_bucket_find(bucket, ref, exact_match);
bucket            347 kernel/dma/debug.c static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
bucket            357 kernel/dma/debug.c 		entry = __hash_bucket_find(*bucket, ref, containing_match);
bucket            365 kernel/dma/debug.c 		put_hash_bucket(*bucket, flags);
bucket            368 kernel/dma/debug.c 		*bucket = get_hash_bucket(&index, flags);
bucket            377 kernel/dma/debug.c static void hash_bucket_add(struct hash_bucket *bucket,
bucket            380 kernel/dma/debug.c 	list_add_tail(&entry->list, &bucket->list);
bucket            407 kernel/dma/debug.c 		struct hash_bucket *bucket = &dma_entry_hash[idx];
bucket            411 kernel/dma/debug.c 		spin_lock_irqsave(&bucket->lock, flags);
bucket            413 kernel/dma/debug.c 		list_for_each_entry(entry, &bucket->list, list) {
bucket            425 kernel/dma/debug.c 		spin_unlock_irqrestore(&bucket->lock, flags);
bucket            609 kernel/dma/debug.c 	struct hash_bucket *bucket;
bucket            613 kernel/dma/debug.c 	bucket = get_hash_bucket(entry, &flags);
bucket            614 kernel/dma/debug.c 	hash_bucket_add(bucket, entry);
bucket            615 kernel/dma/debug.c 	put_hash_bucket(bucket, &flags);
bucket            827 kernel/dma/debug.c 		struct hash_bucket *bucket = &dma_entry_hash[idx];
bucket            831 kernel/dma/debug.c 		spin_lock_irqsave(&bucket->lock, flags);
bucket            832 kernel/dma/debug.c 		list_for_each_entry(entry, &bucket->list, list) {
bucket            843 kernel/dma/debug.c 		spin_unlock_irqrestore(&bucket->lock, flags);
bucket           1000 kernel/dma/debug.c 	struct hash_bucket *bucket;
bucket           1003 kernel/dma/debug.c 	bucket = get_hash_bucket(ref, &flags);
bucket           1004 kernel/dma/debug.c 	entry = bucket_find_exact(bucket, ref);
bucket           1008 kernel/dma/debug.c 		put_hash_bucket(bucket, &flags);
bucket           1090 kernel/dma/debug.c 	put_hash_bucket(bucket, &flags);
bucket           1143 kernel/dma/debug.c 	struct hash_bucket *bucket;
bucket           1146 kernel/dma/debug.c 	bucket = get_hash_bucket(ref, &flags);
bucket           1148 kernel/dma/debug.c 	entry = bucket_find_contain(&bucket, ref, &flags);
bucket           1210 kernel/dma/debug.c 	put_hash_bucket(bucket, &flags);
bucket           1295 kernel/dma/debug.c 	struct hash_bucket *bucket;
bucket           1303 kernel/dma/debug.c 	bucket = get_hash_bucket(&ref, &flags);
bucket           1305 kernel/dma/debug.c 	list_for_each_entry(entry, &bucket->list, list) {
bucket           1325 kernel/dma/debug.c 	put_hash_bucket(bucket, &flags);
bucket           1388 kernel/dma/debug.c 	struct hash_bucket *bucket;
bucket           1392 kernel/dma/debug.c 	bucket       = get_hash_bucket(ref, &flags);
bucket           1393 kernel/dma/debug.c 	entry        = bucket_find_exact(bucket, ref);
bucket           1398 kernel/dma/debug.c 	put_hash_bucket(bucket, &flags);
bucket            859 kernel/sched/core.c 	struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket;
bucket            867 kernel/sched/core.c 		if (!bucket[bucket_id].tasks)
bucket            869 kernel/sched/core.c 		return bucket[bucket_id].value;
bucket            949 kernel/sched/core.c 	struct uclamp_bucket *bucket;
bucket            956 kernel/sched/core.c 	bucket = &uc_rq->bucket[uc_se->bucket_id];
bucket            957 kernel/sched/core.c 	bucket->tasks++;
bucket            966 kernel/sched/core.c 	if (bucket->tasks == 1 || uc_se->value > bucket->value)
bucket            967 kernel/sched/core.c 		bucket->value = uc_se->value;
bucket            987 kernel/sched/core.c 	struct uclamp_bucket *bucket;
bucket            993 kernel/sched/core.c 	bucket = &uc_rq->bucket[uc_se->bucket_id];
bucket            994 kernel/sched/core.c 	SCHED_WARN_ON(!bucket->tasks);
bucket            995 kernel/sched/core.c 	if (likely(bucket->tasks))
bucket            996 kernel/sched/core.c 		bucket->tasks--;
bucket           1005 kernel/sched/core.c 	if (likely(bucket->tasks))
bucket           1013 kernel/sched/core.c 	SCHED_WARN_ON(bucket->value > rq_clamp);
bucket           1014 kernel/sched/core.c 	if (bucket->value >= rq_clamp) {
bucket            842 kernel/sched/sched.h 	struct uclamp_bucket bucket[UCLAMP_BUCKETS];
bucket             34 lib/rhashtable.c 	struct rhash_lock_head *bucket;
bucket            128 lib/rhashtable.c 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
bucket           1191 lib/rhashtable.c 	return &ntbl[subhash].bucket;
bucket           1232 lib/rhashtable.c 	return &ntbl[hash].bucket;
bucket            177 lib/stackdepot.c static inline struct stack_record *find_stack(struct stack_record *bucket,
bucket            183 lib/stackdepot.c 	for (found = bucket; found; found = found->next) {
bucket            227 lib/stackdepot.c 	struct stack_record *found = NULL, **bucket;
bucket            238 lib/stackdepot.c 	bucket = &stack_table[hash & STACK_HASH_MASK];
bucket            245 lib/stackdepot.c 	found = find_stack(smp_load_acquire(bucket), entries,
bucket            274 lib/stackdepot.c 	found = find_stack(*bucket, entries, nr_entries, hash);
bucket            280 lib/stackdepot.c 			new->next = *bucket;
bucket            285 lib/stackdepot.c 			smp_store_release(bucket, new);
bucket            181 net/9p/error.c 	int bucket;
bucket            184 net/9p/error.c 	for (bucket = 0; bucket < ERRHASHSZ; bucket++)
bucket            185 net/9p/error.c 		INIT_HLIST_HEAD(&hash_errmap[bucket]);
bucket            190 net/9p/error.c 		bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
bucket            192 net/9p/error.c 		hlist_add_head(&c->list, &hash_errmap[bucket]);
bucket            210 net/9p/error.c 	int bucket;
bucket            214 net/9p/error.c 	bucket = jhash(errstr, len, 0) % ERRHASHSZ;
bucket            215 net/9p/error.c 	hlist_for_each_entry(c, &hash_errmap[bucket], list) {
bucket            921 net/appletalk/aarp.c 	int ct = iter->bucket;
bucket            931 net/appletalk/aarp.c 				iter->bucket = ct;
bucket            958 net/appletalk/aarp.c 	iter->bucket    = 0;
bucket            980 net/appletalk/aarp.c 		++iter->bucket;
bucket             69 net/atm/proc.c 	int bucket;
bucket             78 net/atm/proc.c static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l)
bucket             83 net/atm/proc.c 		for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
bucket             84 net/atm/proc.c 			struct hlist_head *head = &vcc_hash[*bucket];
bucket             98 net/atm/proc.c 	if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
bucket             99 net/atm/proc.c 		sk = sk_head(&vcc_hash[*bucket]);
bucket            113 net/atm/proc.c 	return __vcc_walk(&state->sk, family, &state->bucket, l) ?
bucket           2088 net/batman-adv/bat_iv_ogm.c 	int bucket = cb->args[0];
bucket           2093 net/batman-adv/bat_iv_ogm.c 	while (bucket < hash->size) {
bucket           2094 net/batman-adv/bat_iv_ogm.c 		head = &hash->table[bucket];
bucket           2102 net/batman-adv/bat_iv_ogm.c 		bucket++;
bucket           2105 net/batman-adv/bat_iv_ogm.c 	cb->args[0] = bucket;
bucket            583 net/batman-adv/bat_v.c 	int bucket = cb->args[0];
bucket            588 net/batman-adv/bat_v.c 	while (bucket < hash->size) {
bucket            589 net/batman-adv/bat_v.c 		head = &hash->table[bucket];
bucket            597 net/batman-adv/bat_v.c 		bucket++;
bucket            600 net/batman-adv/bat_v.c 	cb->args[0] = bucket;
bucket           2169 net/batman-adv/bridge_loop_avoidance.c 			     struct batadv_hashtable *hash, unsigned int bucket,
bucket           2176 net/batman-adv/bridge_loop_avoidance.c 	spin_lock_bh(&hash->list_locks[bucket]);
bucket           2179 net/batman-adv/bridge_loop_avoidance.c 	hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
bucket           2193 net/batman-adv/bridge_loop_avoidance.c 	spin_unlock_bh(&hash->list_locks[bucket]);
bucket           2212 net/batman-adv/bridge_loop_avoidance.c 	int bucket = cb->args[0];
bucket           2237 net/batman-adv/bridge_loop_avoidance.c 	while (bucket < hash->size) {
bucket           2239 net/batman-adv/bridge_loop_avoidance.c 						 hash, bucket, &idx))
bucket           2241 net/batman-adv/bridge_loop_avoidance.c 		bucket++;
bucket           2244 net/batman-adv/bridge_loop_avoidance.c 	cb->args[0] = bucket;
bucket           2408 net/batman-adv/bridge_loop_avoidance.c 				unsigned int bucket, int *idx_skip)
bucket           2414 net/batman-adv/bridge_loop_avoidance.c 	spin_lock_bh(&hash->list_locks[bucket]);
bucket           2417 net/batman-adv/bridge_loop_avoidance.c 	hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
bucket           2431 net/batman-adv/bridge_loop_avoidance.c 	spin_unlock_bh(&hash->list_locks[bucket]);
bucket           2450 net/batman-adv/bridge_loop_avoidance.c 	int bucket = cb->args[0];
bucket           2475 net/batman-adv/bridge_loop_avoidance.c 	while (bucket < hash->size) {
bucket           2477 net/batman-adv/bridge_loop_avoidance.c 						    hash, bucket, &idx))
bucket           2479 net/batman-adv/bridge_loop_avoidance.c 		bucket++;
bucket           2482 net/batman-adv/bridge_loop_avoidance.c 	cb->args[0] = bucket;
bucket            954 net/batman-adv/distributed-arp-table.c 			     struct batadv_hashtable *hash, unsigned int bucket,
bucket            960 net/batman-adv/distributed-arp-table.c 	spin_lock_bh(&hash->list_locks[bucket]);
bucket            963 net/batman-adv/distributed-arp-table.c 	hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
bucket            968 net/batman-adv/distributed-arp-table.c 			spin_unlock_bh(&hash->list_locks[bucket]);
bucket            977 net/batman-adv/distributed-arp-table.c 	spin_unlock_bh(&hash->list_locks[bucket]);
bucket            997 net/batman-adv/distributed-arp-table.c 	int bucket = cb->args[0];
bucket           1022 net/batman-adv/distributed-arp-table.c 	while (bucket < hash->size) {
bucket           1023 net/batman-adv/distributed-arp-table.c 		if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
bucket           1027 net/batman-adv/distributed-arp-table.c 		bucket++;
bucket           1031 net/batman-adv/distributed-arp-table.c 	cb->args[0] = bucket;
bucket            146 net/batman-adv/fragmentation.c 	u8 bucket;
bucket            159 net/batman-adv/fragmentation.c 	bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
bucket            172 net/batman-adv/fragmentation.c 	chain = &orig_node->fragments[bucket];
bucket           2253 net/batman-adv/multicast.c 			       unsigned int bucket, long *idx_skip)
bucket           2258 net/batman-adv/multicast.c 	spin_lock_bh(&hash->list_locks[bucket]);
bucket           2261 net/batman-adv/multicast.c 	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
bucket           2270 net/batman-adv/multicast.c 			spin_unlock_bh(&hash->list_locks[bucket]);
bucket           2279 net/batman-adv/multicast.c 	spin_unlock_bh(&hash->list_locks[bucket]);
bucket           2298 net/batman-adv/multicast.c 			  struct batadv_priv *bat_priv, long *bucket, long *idx)
bucket           2301 net/batman-adv/multicast.c 	long bucket_tmp = *bucket;
bucket           2313 net/batman-adv/multicast.c 	*bucket = bucket_tmp;
bucket           2380 net/batman-adv/multicast.c 	long *bucket = &cb->args[0];
bucket           2389 net/batman-adv/multicast.c 	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
bucket           1218 net/batman-adv/translation-table.c 			    struct batadv_hashtable *hash, unsigned int bucket,
bucket           1224 net/batman-adv/translation-table.c 	spin_lock_bh(&hash->list_locks[bucket]);
bucket           1227 net/batman-adv/translation-table.c 	hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
bucket           1233 net/batman-adv/translation-table.c 			spin_unlock_bh(&hash->list_locks[bucket]);
bucket           1238 net/batman-adv/translation-table.c 	spin_unlock_bh(&hash->list_locks[bucket]);
bucket           1260 net/batman-adv/translation-table.c 	int bucket = cb->args[0];
bucket           1284 net/batman-adv/translation-table.c 	while (bucket < hash->size) {
bucket           1286 net/batman-adv/translation-table.c 						hash, bucket, &idx))
bucket           1289 net/batman-adv/translation-table.c 		bucket++;
bucket           1300 net/batman-adv/translation-table.c 	cb->args[0] = bucket;
bucket           2177 net/batman-adv/translation-table.c 	int bucket = cb->args[0];
bucket           2202 net/batman-adv/translation-table.c 	while (bucket < hash->size) {
bucket           2203 net/batman-adv/translation-table.c 		head = &hash->table[bucket];
bucket           2210 net/batman-adv/translation-table.c 		bucket++;
bucket           2221 net/batman-adv/translation-table.c 	cb->args[0] = bucket;
bucket             74 net/ceph/crush/mapper.c static int bucket_perm_choose(const struct crush_bucket *bucket,
bucket             78 net/ceph/crush/mapper.c 	unsigned int pr = r % bucket->size;
bucket             83 net/ceph/crush/mapper.c 		dprintk("bucket %d new x=%d\n", bucket->id, x);
bucket             88 net/ceph/crush/mapper.c 			s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
bucket             89 net/ceph/crush/mapper.c 				bucket->size;
bucket             95 net/ceph/crush/mapper.c 		for (i = 0; i < bucket->size; i++)
bucket            100 net/ceph/crush/mapper.c 		for (i = 1; i < bucket->size; i++)
bucket            112 net/ceph/crush/mapper.c 		if (p < bucket->size - 1) {
bucket            113 net/ceph/crush/mapper.c 			i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
bucket            114 net/ceph/crush/mapper.c 				(bucket->size - p);
bucket            124 net/ceph/crush/mapper.c 	for (i = 0; i < bucket->size; i++)
bucket            129 net/ceph/crush/mapper.c 	dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
bucket            130 net/ceph/crush/mapper.c 		bucket->size, x, r, pr, s);
bucket            131 net/ceph/crush/mapper.c 	return bucket->items[s];
bucket            135 net/ceph/crush/mapper.c static int bucket_uniform_choose(const struct crush_bucket_uniform *bucket,
bucket            138 net/ceph/crush/mapper.c 	return bucket_perm_choose(&bucket->h, work, x, r);
bucket            142 net/ceph/crush/mapper.c static int bucket_list_choose(const struct crush_bucket_list *bucket,
bucket            147 net/ceph/crush/mapper.c 	for (i = bucket->h.size-1; i >= 0; i--) {
bucket            148 net/ceph/crush/mapper.c 		__u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i],
bucket            149 net/ceph/crush/mapper.c 					 r, bucket->h.id);
bucket            153 net/ceph/crush/mapper.c 			i, x, r, bucket->h.items[i], bucket->item_weights[i],
bucket            154 net/ceph/crush/mapper.c 			bucket->sum_weights[i], w);
bucket            155 net/ceph/crush/mapper.c 		w *= bucket->sum_weights[i];
bucket            158 net/ceph/crush/mapper.c 		if (w < bucket->item_weights[i]) {
bucket            159 net/ceph/crush/mapper.c 			return bucket->h.items[i];
bucket            163 net/ceph/crush/mapper.c 	dprintk("bad list sums for bucket %d\n", bucket->h.id);
bucket            164 net/ceph/crush/mapper.c 	return bucket->h.items[0];
bucket            196 net/ceph/crush/mapper.c static int bucket_tree_choose(const struct crush_bucket_tree *bucket,
bucket            204 net/ceph/crush/mapper.c 	n = bucket->num_nodes >> 1;
bucket            209 net/ceph/crush/mapper.c 		w = bucket->node_weights[n];
bucket            210 net/ceph/crush/mapper.c 		t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
bucket            211 net/ceph/crush/mapper.c 					  bucket->h.id) * (__u64)w;
bucket            216 net/ceph/crush/mapper.c 		if (t < bucket->node_weights[l])
bucket            222 net/ceph/crush/mapper.c 	return bucket->h.items[n >> 1];
bucket            228 net/ceph/crush/mapper.c static int bucket_straw_choose(const struct crush_bucket_straw *bucket,
bucket            236 net/ceph/crush/mapper.c 	for (i = 0; i < bucket->h.size; i++) {
bucket            237 net/ceph/crush/mapper.c 		draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
bucket            239 net/ceph/crush/mapper.c 		draw *= bucket->straws[i];
bucket            245 net/ceph/crush/mapper.c 	return bucket->h.items[high];
bucket            305 net/ceph/crush/mapper.c static __u32 *get_choose_arg_weights(const struct crush_bucket_straw2 *bucket,
bucket            310 net/ceph/crush/mapper.c 		return bucket->item_weights;
bucket            317 net/ceph/crush/mapper.c static __s32 *get_choose_arg_ids(const struct crush_bucket_straw2 *bucket,
bucket            321 net/ceph/crush/mapper.c 		return bucket->h.items;
bucket            326 net/ceph/crush/mapper.c static int bucket_straw2_choose(const struct crush_bucket_straw2 *bucket,
bucket            334 net/ceph/crush/mapper.c 	__u32 *weights = get_choose_arg_weights(bucket, arg, position);
bucket            335 net/ceph/crush/mapper.c 	__s32 *ids = get_choose_arg_ids(bucket, arg);
bucket            337 net/ceph/crush/mapper.c 	for (i = 0; i < bucket->h.size; i++) {
bucket            340 net/ceph/crush/mapper.c 			u = crush_hash32_3(bucket->h.hash, x, ids[i], r);
bucket            372 net/ceph/crush/mapper.c 	return bucket->h.items[high];
bucket            451 net/ceph/crush/mapper.c 			       const struct crush_bucket *bucket,
bucket            470 net/ceph/crush/mapper.c 	const struct crush_bucket *in = bucket;
bucket            480 net/ceph/crush/mapper.c 		bucket->id, x, outpos, numrep,
bucket            490 net/ceph/crush/mapper.c 			in = bucket;               /* initial bucket */
bucket            645 net/ceph/crush/mapper.c 			       const struct crush_bucket *bucket,
bucket            656 net/ceph/crush/mapper.c 	const struct crush_bucket *in = bucket;
bucket            667 net/ceph/crush/mapper.c 		bucket->id, x, outpos, numrep);
bucket            695 net/ceph/crush/mapper.c 			in = bucket;  /* initial bucket */
bucket             49 net/core/bpf_sk_storage.c 	struct bucket *buckets;
bucket             93 net/core/bpf_sk_storage.c static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
bucket            225 net/core/bpf_sk_storage.c 	struct bucket *b;
bucket            242 net/core/bpf_sk_storage.c 	struct bucket *b = select_bucket(smap, selem);
bucket            557 net/core/bpf_sk_storage.c 	struct bucket *b;
bucket           3033 net/core/neighbour.c 	int bucket;
bucket           3036 net/core/neighbour.c 	for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
bucket           3037 net/core/neighbour.c 		n = rcu_dereference_bh(nht->hash_buckets[bucket]);
bucket           3061 net/core/neighbour.c 	state->bucket = bucket;
bucket           3103 net/core/neighbour.c 		if (++state->bucket >= (1 << nht->hash_shift))
bucket           3106 net/core/neighbour.c 		n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
bucket           3135 net/core/neighbour.c 	int bucket = state->bucket;
bucket           3138 net/core/neighbour.c 	for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
bucket           3139 net/core/neighbour.c 		pn = tbl->phash_buckets[bucket];
bucket           3145 net/core/neighbour.c 	state->bucket = bucket;
bucket           3163 net/core/neighbour.c 		if (++state->bucket > PNEIGH_HASHMASK)
bucket           3165 net/core/neighbour.c 		pn = tbl->phash_buckets[state->bucket];
bucket           3213 net/core/neighbour.c 	state->bucket = 0;
bucket             35 net/core/net-procfs.c 	unsigned int bucket;
bucket             42 net/core/net-procfs.c 		bucket = get_bucket(*pos) + 1;
bucket             43 net/core/net-procfs.c 		*pos = set_bucket_offset(bucket, 1);
bucket             44 net/core/net-procfs.c 	} while (bucket < NETDEV_HASHENTRIES);
bucket            570 net/core/sock_map.c 	struct bpf_htab_bucket *bucket;
bucket            576 net/core/sock_map.c 	bucket = sock_hash_select_bucket(htab, hash);
bucket            577 net/core/sock_map.c 	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
bucket            594 net/core/sock_map.c 	struct bpf_htab_bucket *bucket;
bucket            597 net/core/sock_map.c 	bucket = sock_hash_select_bucket(htab, elem->hash);
bucket            603 net/core/sock_map.c 	raw_spin_lock_bh(&bucket->lock);
bucket            604 net/core/sock_map.c 	elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
bucket            611 net/core/sock_map.c 	raw_spin_unlock_bh(&bucket->lock);
bucket            618 net/core/sock_map.c 	struct bpf_htab_bucket *bucket;
bucket            623 net/core/sock_map.c 	bucket = sock_hash_select_bucket(htab, hash);
bucket            625 net/core/sock_map.c 	raw_spin_lock_bh(&bucket->lock);
bucket            626 net/core/sock_map.c 	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
bucket            633 net/core/sock_map.c 	raw_spin_unlock_bh(&bucket->lock);
bucket            670 net/core/sock_map.c 	struct bpf_htab_bucket *bucket;
bucket            693 net/core/sock_map.c 	bucket = sock_hash_select_bucket(htab, hash);
bucket            695 net/core/sock_map.c 	raw_spin_lock_bh(&bucket->lock);
bucket            696 net/core/sock_map.c 	elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
bucket            715 net/core/sock_map.c 	hlist_add_head_rcu(&elem_new->node, &bucket->head);
bucket            721 net/core/sock_map.c 	raw_spin_unlock_bh(&bucket->lock);
bucket            724 net/core/sock_map.c 	raw_spin_unlock_bh(&bucket->lock);
bucket            863 net/core/sock_map.c 	struct bpf_htab_bucket *bucket;
bucket            874 net/core/sock_map.c 		bucket = sock_hash_select_bucket(htab, i);
bucket            875 net/core/sock_map.c 		hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
bucket           2112 net/decnet/af_decnet.c 	int bucket;
bucket           2120 net/decnet/af_decnet.c 	for(state->bucket = 0;
bucket           2121 net/decnet/af_decnet.c 	    state->bucket < DN_SK_HASH_SIZE;
bucket           2122 net/decnet/af_decnet.c 	    ++state->bucket) {
bucket           2123 net/decnet/af_decnet.c 		n = sk_head(&dn_sk_hash[state->bucket]);
bucket           2140 net/decnet/af_decnet.c 	if (++state->bucket >= DN_SK_HASH_SIZE)
bucket           2142 net/decnet/af_decnet.c 	n = sk_head(&dn_sk_hash[state->bucket]);
bucket           1772 net/decnet/dn_route.c 	int bucket;
bucket           1780 net/decnet/dn_route.c 	for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
bucket           1782 net/decnet/dn_route.c 		rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
bucket           1797 net/decnet/dn_route.c 		if (--s->bucket < 0)
bucket           1800 net/decnet/dn_route.c 		rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
bucket           1876 net/ipv4/fib_semantics.c 	struct fnhe_hash_bucket *bucket;
bucket           1879 net/ipv4/fib_semantics.c 	bucket = rcu_dereference_protected(nhc->nhc_exceptions, 1);
bucket           1880 net/ipv4/fib_semantics.c 	if (!bucket)
bucket           1886 net/ipv4/fib_semantics.c 		for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
bucket           1017 net/ipv4/ping.c 	for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
bucket           1018 net/ipv4/ping.c 	     ++state->bucket) {
bucket           1022 net/ipv4/ping.c 		hslot = &ping_table.hash[state->bucket];
bucket           1048 net/ipv4/ping.c 		return ping_get_first(seq, state->bucket + 1);
bucket           1066 net/ipv4/ping.c 	state->bucket = 0;
bucket           1102 net/ipv4/ping.c 		int bucket)
bucket           1112 net/ipv4/ping.c 		bucket, src, srcp, dest, destp, sp->sk_state,
bucket           1132 net/ipv4/ping.c 		ping_v4_format_sock(v, seq, state->bucket);
bucket            997 net/ipv4/raw.c 	for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
bucket            998 net/ipv4/raw.c 			++state->bucket) {
bucket            999 net/ipv4/raw.c 		sk_for_each(sk, &h->ht[state->bucket])
bucket           1019 net/ipv4/raw.c 	if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
bucket           1020 net/ipv4/raw.c 		sk = sk_head(&h->ht[state->bucket]);
bucket           1092 net/ipv4/raw.c 		raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
bucket           2872 net/ipv4/route.c 			    struct fnhe_hash_bucket *bucket, int genid,
bucket           2880 net/ipv4/route.c 		for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
bucket           2924 net/ipv4/route.c 		struct fnhe_hash_bucket *bucket;
bucket           2931 net/ipv4/route.c 		bucket = rcu_dereference(nhc->nhc_exceptions);
bucket           2933 net/ipv4/route.c 		if (bucket)
bucket           2934 net/ipv4/route.c 			err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
bucket           2157 net/ipv4/tcp_ipv4.c 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
bucket           2163 net/ipv4/tcp_ipv4.c 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
bucket           2177 net/ipv4/tcp_ipv4.c 	if (++st->bucket < INET_LHTABLE_SIZE)
bucket           2187 net/ipv4/tcp_ipv4.c 	st->bucket = 0;
bucket           2200 net/ipv4/tcp_ipv4.c 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
bucket           2215 net/ipv4/tcp_ipv4.c 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
bucket           2218 net/ipv4/tcp_ipv4.c 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
bucket           2225 net/ipv4/tcp_ipv4.c 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
bucket           2258 net/ipv4/tcp_ipv4.c 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
bucket           2259 net/ipv4/tcp_ipv4.c 	++st->bucket;
bucket           2268 net/ipv4/tcp_ipv4.c 	st->bucket = 0;
bucket           2303 net/ipv4/tcp_ipv4.c 		if (st->bucket >= INET_LHTABLE_SIZE)
bucket           2311 net/ipv4/tcp_ipv4.c 		st->bucket = 0;
bucket           2315 net/ipv4/tcp_ipv4.c 		if (st->bucket > tcp_hashinfo.ehash_mask)
bucket           2340 net/ipv4/tcp_ipv4.c 	st->bucket = 0;
bucket           2365 net/ipv4/tcp_ipv4.c 			st->bucket = 0;
bucket           2388 net/ipv4/tcp_ipv4.c 			spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
bucket           2392 net/ipv4/tcp_ipv4.c 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
bucket           2819 net/ipv4/udp.c 	for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
bucket           2820 net/ipv4/udp.c 	     ++state->bucket) {
bucket           2821 net/ipv4/udp.c 		struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
bucket           2851 net/ipv4/udp.c 		if (state->bucket <= afinfo->udp_table->mask)
bucket           2852 net/ipv4/udp.c 			spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
bucket           2853 net/ipv4/udp.c 		return udp_get_first(seq, state->bucket + 1);
bucket           2871 net/ipv4/udp.c 	state->bucket = MAX_UDP_PORTS;
bucket           2896 net/ipv4/udp.c 	if (state->bucket <= afinfo->udp_table->mask)
bucket           2897 net/ipv4/udp.c 		spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
bucket           2903 net/ipv4/udp.c 		int bucket)
bucket           2913 net/ipv4/udp.c 		bucket, src, srcp, dest, destp, sp->sk_state,
bucket           2933 net/ipv4/udp.c 		udp4_format_sock(v, seq, state->bucket);
bucket           4250 net/ipv6/addrconf.c 	int bucket;
bucket           4263 net/ipv6/addrconf.c 		state->bucket = 0;
bucket           4267 net/ipv6/addrconf.c 	for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
bucket           4268 net/ipv6/addrconf.c 		hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket],
bucket           4301 net/ipv6/addrconf.c 	while (++state->bucket < IN6_ADDR_HSIZE) {
bucket           4303 net/ipv6/addrconf.c 				     &inet6_addr_lst[state->bucket], addr_lst) {
bucket           1027 net/ipv6/datagram.c 			       __u16 srcp, __u16 destp, int rqueue, int bucket)
bucket           1036 net/ipv6/datagram.c 		   bucket,
bucket            722 net/ipv6/ip6_flowlabel.c 	int bucket;
bucket            733 net/ipv6/ip6_flowlabel.c 	for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
bucket            734 net/ipv6/ip6_flowlabel.c 		for_each_fl_rcu(state->bucket, fl) {
bucket            755 net/ipv6/ip6_flowlabel.c 	if (++state->bucket <= FL_HASH_MASK) {
bucket            756 net/ipv6/ip6_flowlabel.c 		for_each_fl_rcu(state->bucket, fl) {
bucket            202 net/ipv6/ping.c 		int bucket = ((struct ping_iter_state *) seq->private)->bucket;
bucket            206 net/ipv6/ping.c 		ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
bucket           1316 net/ipv6/raw.c 					raw_seq_private(seq)->bucket);
bucket           1456 net/ipv6/route.c static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
bucket           1462 net/ipv6/route.c 	if (!bucket || !rt6_ex)
bucket           1478 net/ipv6/route.c 	WARN_ON_ONCE(!bucket->depth);
bucket           1479 net/ipv6/route.c 	bucket->depth--;
bucket           1485 net/ipv6/route.c static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
bucket           1489 net/ipv6/route.c 	if (!bucket)
bucket           1492 net/ipv6/route.c 	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
bucket           1496 net/ipv6/route.c 	rt6_remove_exception(bucket, oldest);
bucket           1521 net/ipv6/route.c __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
bucket           1528 net/ipv6/route.c 	if (!(*bucket) || !daddr)
bucket           1532 net/ipv6/route.c 	*bucket += hval;
bucket           1534 net/ipv6/route.c 	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
bucket           1554 net/ipv6/route.c __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
bucket           1563 net/ipv6/route.c 	if (!(*bucket) || !daddr)
bucket           1567 net/ipv6/route.c 	*bucket += hval;
bucket           1569 net/ipv6/route.c 	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
bucket           1616 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1619 net/ipv6/route.c 		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
bucket           1622 net/ipv6/route.c 		bucket = rcu_dereference(nh->rt6i_exception_bucket);
bucket           1625 net/ipv6/route.c 	if (bucket) {
bucket           1626 net/ipv6/route.c 		unsigned long p = (unsigned long)bucket;
bucket           1629 net/ipv6/route.c 		bucket = (struct rt6_exception_bucket *)p;
bucket           1632 net/ipv6/route.c 	return bucket;
bucket           1635 net/ipv6/route.c static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
bucket           1637 net/ipv6/route.c 	unsigned long p = (unsigned long)bucket;
bucket           1646 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1649 net/ipv6/route.c 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
bucket           1652 net/ipv6/route.c 	p = (unsigned long)bucket;
bucket           1654 net/ipv6/route.c 	bucket = (struct rt6_exception_bucket *)p;
bucket           1655 net/ipv6/route.c 	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
bucket           1662 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1671 net/ipv6/route.c 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
bucket           1673 net/ipv6/route.c 	if (!bucket) {
bucket           1674 net/ipv6/route.c 		bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
bucket           1676 net/ipv6/route.c 		if (!bucket) {
bucket           1680 net/ipv6/route.c 		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
bucket           1681 net/ipv6/route.c 	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
bucket           1705 net/ipv6/route.c 	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
bucket           1708 net/ipv6/route.c 		rt6_remove_exception(bucket, rt6_ex);
bucket           1717 net/ipv6/route.c 	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
bucket           1718 net/ipv6/route.c 	bucket->depth++;
bucket           1721 net/ipv6/route.c 	if (bucket->depth > FIB6_MAX_DEPTH)
bucket           1722 net/ipv6/route.c 		rt6_exception_remove_oldest(bucket);
bucket           1740 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1747 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
bucket           1748 net/ipv6/route.c 	if (!bucket)
bucket           1756 net/ipv6/route.c 		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
bucket           1759 net/ipv6/route.c 				rt6_remove_exception(bucket, rt6_ex);
bucket           1761 net/ipv6/route.c 		WARN_ON_ONCE(!from && bucket->depth);
bucket           1762 net/ipv6/route.c 		bucket++;
bucket           1794 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1814 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
bucket           1815 net/ipv6/route.c 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
bucket           1836 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1844 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
bucket           1856 net/ipv6/route.c 	rt6_ex = __rt6_find_exception_spinlock(&bucket,
bucket           1860 net/ipv6/route.c 		rt6_remove_exception(bucket, rt6_ex);
bucket           1920 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           1923 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
bucket           1934 net/ipv6/route.c 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
bucket           2017 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           2021 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
bucket           2022 net/ipv6/route.c 	if (!bucket)
bucket           2026 net/ipv6/route.c 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
bucket           2037 net/ipv6/route.c 		bucket++;
bucket           2046 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           2055 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
bucket           2056 net/ipv6/route.c 	if (bucket) {
bucket           2059 net/ipv6/route.c 						  &bucket->chain, hlist) {
bucket           2066 net/ipv6/route.c 					rt6_remove_exception(bucket, rt6_ex);
bucket           2069 net/ipv6/route.c 			bucket++;
bucket           2076 net/ipv6/route.c static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
bucket           2092 net/ipv6/route.c 			rt6_remove_exception(bucket, rt6_ex);
bucket           2097 net/ipv6/route.c 		rt6_remove_exception(bucket, rt6_ex);
bucket           2112 net/ipv6/route.c 			rt6_remove_exception(bucket, rt6_ex);
bucket           2124 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           2134 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
bucket           2135 net/ipv6/route.c 	if (bucket) {
bucket           2138 net/ipv6/route.c 						  &bucket->chain, hlist) {
bucket           2139 net/ipv6/route.c 				rt6_age_examine_exception(bucket, rt6_ex,
bucket           2142 net/ipv6/route.c 			bucket++;
bucket           3539 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           3544 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
bucket           3545 net/ipv6/route.c 	if (bucket) {
bucket           3547 net/ipv6/route.c 		kfree(bucket);
bucket           5614 net/ipv6/route.c 	struct rt6_exception_bucket *bucket;
bucket           5618 net/ipv6/route.c 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
bucket           5619 net/ipv6/route.c 	if (!bucket)
bucket           5623 net/ipv6/route.c 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
bucket           5655 net/ipv6/route.c 		bucket++;
bucket           1615 net/ipv6/udp.c 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
bucket           1620 net/ipv6/udp.c 					  udp_rqueue_get(v), bucket);
bucket             67 net/llc/llc_proc.c static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
bucket             72 net/llc/llc_proc.c 	while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
bucket             73 net/llc/llc_proc.c 		sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
bucket            172 net/mac80211/mesh.c 		INIT_HLIST_HEAD(&sdata->u.mesh.rmc->bucket[i]);
bucket            187 net/mac80211/mesh.c 		hlist_for_each_entry_safe(p, n, &rmc->bucket[i], list) {
bucket            226 net/mac80211/mesh.c 	hlist_for_each_entry_safe(p, n, &rmc->bucket[idx], list) {
bucket            244 net/mac80211/mesh.c 	hlist_add_head(&p->list, &rmc->bucket[idx]);
bucket            176 net/mac80211/mesh.h 	struct hlist_head bucket[RMC_BUCKETS];
bucket            112 net/netfilter/ipset/ip_set_hash_gen.h 	struct hbucket __rcu *bucket[0]; /* hashtable buckets */
bucket            115 net/netfilter/ipset/ip_set_hash_gen.h #define hbucket(h, i)		((h)->bucket[i])
bucket           1996 net/netfilter/ipvs/ip_vs_ctl.c 	int bucket;
bucket           2032 net/netfilter/ipvs/ip_vs_ctl.c 				iter->bucket = idx;
bucket           2044 net/netfilter/ipvs/ip_vs_ctl.c 				iter->bucket = idx;
bucket           2080 net/netfilter/ipvs/ip_vs_ctl.c 		while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
bucket           2082 net/netfilter/ipvs/ip_vs_ctl.c 						 &ip_vs_svc_table[iter->bucket],
bucket           2089 net/netfilter/ipvs/ip_vs_ctl.c 		iter->bucket = -1;
bucket           2099 net/netfilter/ipvs/ip_vs_ctl.c 	while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
bucket           2101 net/netfilter/ipvs/ip_vs_ctl.c 					 &ip_vs_svc_fwm_table[iter->bucket],
bucket            103 net/netfilter/ipvs/ip_vs_lblc.c 	struct hlist_head	bucket[IP_VS_LBLC_TAB_SIZE];  /* hash bucket */
bucket            172 net/netfilter/ipvs/ip_vs_lblc.c 	hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
bucket            185 net/netfilter/ipvs/ip_vs_lblc.c 	hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
bucket            239 net/netfilter/ipvs/ip_vs_lblc.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
bucket            268 net/netfilter/ipvs/ip_vs_lblc.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
bucket            324 net/netfilter/ipvs/ip_vs_lblc.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
bucket            363 net/netfilter/ipvs/ip_vs_lblc.c 		INIT_HLIST_HEAD(&tbl->bucket[i]);
bucket            273 net/netfilter/ipvs/ip_vs_lblcr.c 	struct hlist_head	bucket[IP_VS_LBLCR_TAB_SIZE];  /* hash bucket */
bucket            335 net/netfilter/ipvs/ip_vs_lblcr.c 	hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
bucket            348 net/netfilter/ipvs/ip_vs_lblcr.c 	hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
bucket            405 net/netfilter/ipvs/ip_vs_lblcr.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
bucket            433 net/netfilter/ipvs/ip_vs_lblcr.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
bucket            488 net/netfilter/ipvs/ip_vs_lblcr.c 		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
bucket            526 net/netfilter/ipvs/ip_vs_lblcr.c 		INIT_HLIST_HEAD(&tbl->bucket[i]);
bucket            736 net/netfilter/nf_conntrack_core.c 	unsigned int bucket, hsize;
bucket            740 net/netfilter/nf_conntrack_core.c 	bucket = reciprocal_scale(hash, hsize);
bucket            742 net/netfilter/nf_conntrack_core.c 	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
bucket            759 net/netfilter/nf_conntrack_core.c 	if (get_nulls_value(n) != bucket) {
bucket           1165 net/netfilter/nf_conntrack_core.c 	unsigned int i, bucket;
bucket           1174 net/netfilter/nf_conntrack_core.c 			bucket = reciprocal_scale(hash, hsize);
bucket           1176 net/netfilter/nf_conntrack_core.c 			bucket = (bucket + 1) % hsize;
bucket           1178 net/netfilter/nf_conntrack_core.c 		drops = early_drop_list(net, &ct_hash[bucket]);
bucket           2058 net/netfilter/nf_conntrack_core.c 		void *data, unsigned int *bucket)
bucket           2065 net/netfilter/nf_conntrack_core.c 	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
bucket           2066 net/netfilter/nf_conntrack_core.c 		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
bucket           2069 net/netfilter/nf_conntrack_core.c 		if (*bucket < nf_conntrack_htable_size) {
bucket           2070 net/netfilter/nf_conntrack_core.c 			hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
bucket           2094 net/netfilter/nf_conntrack_core.c 	unsigned int bucket = 0, sequence;
bucket           2102 net/netfilter/nf_conntrack_core.c 		while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
bucket           2112 net/netfilter/nf_conntrack_core.c 		bucket = 0;
bucket           2331 net/netfilter/nf_conntrack_core.c 	int i, bucket;
bucket           2366 net/netfilter/nf_conntrack_core.c 			bucket = __hash_conntrack(nf_ct_net(ct),
bucket           2368 net/netfilter/nf_conntrack_core.c 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
bucket            547 net/netfilter/nf_conntrack_expect.c 	unsigned int bucket;
bucket            555 net/netfilter/nf_conntrack_expect.c 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
bucket            556 net/netfilter/nf_conntrack_expect.c 		n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
bucket            570 net/netfilter/nf_conntrack_expect.c 		if (++st->bucket >= nf_ct_expect_hsize)
bucket            572 net/netfilter/nf_conntrack_expect.c 		head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
bucket            101 net/netfilter/nf_conntrack_standalone.c 	unsigned int bucket;
bucket            110 net/netfilter/nf_conntrack_standalone.c 	for (st->bucket = 0;
bucket            111 net/netfilter/nf_conntrack_standalone.c 	     st->bucket < st->htable_size;
bucket            112 net/netfilter/nf_conntrack_standalone.c 	     st->bucket++) {
bucket            114 net/netfilter/nf_conntrack_standalone.c 			hlist_nulls_first_rcu(&st->hash[st->bucket]));
bucket            128 net/netfilter/nf_conntrack_standalone.c 		if (likely(get_nulls_value(head) == st->bucket)) {
bucket            129 net/netfilter/nf_conntrack_standalone.c 			if (++st->bucket >= st->htable_size)
bucket            133 net/netfilter/nf_conntrack_standalone.c 			hlist_nulls_first_rcu(&st->hash[st->bucket]));
bucket           1013 net/netfilter/nfnetlink_log.c 	unsigned int bucket;
bucket           1024 net/netfilter/nfnetlink_log.c 	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
bucket           1025 net/netfilter/nfnetlink_log.c 		struct hlist_head *head = &log->instance_table[st->bucket];
bucket           1041 net/netfilter/nfnetlink_log.c 		if (++st->bucket >= INSTANCE_BUCKETS)
bucket           1045 net/netfilter/nfnetlink_log.c 		head = &log->instance_table[st->bucket];
bucket           1405 net/netfilter/nfnetlink_queue.c 	unsigned int bucket;
bucket           1419 net/netfilter/nfnetlink_queue.c 	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
bucket           1420 net/netfilter/nfnetlink_queue.c 		if (!hlist_empty(&q->instance_table[st->bucket]))
bucket           1421 net/netfilter/nfnetlink_queue.c 			return q->instance_table[st->bucket].first;
bucket           1435 net/netfilter/nfnetlink_queue.c 		if (++st->bucket >= INSTANCE_BUCKETS)
bucket           1439 net/netfilter/nfnetlink_queue.c 		h = q->instance_table[st->bucket].first;
bucket           1056 net/netfilter/xt_hashlimit.c 	unsigned int *bucket;
bucket           1062 net/netfilter/xt_hashlimit.c 	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
bucket           1063 net/netfilter/xt_hashlimit.c 	if (!bucket)
bucket           1066 net/netfilter/xt_hashlimit.c 	*bucket = *pos;
bucket           1067 net/netfilter/xt_hashlimit.c 	return bucket;
bucket           1073 net/netfilter/xt_hashlimit.c 	unsigned int *bucket = v;
bucket           1075 net/netfilter/xt_hashlimit.c 	*pos = ++(*bucket);
bucket           1080 net/netfilter/xt_hashlimit.c 	return bucket;
bucket           1087 net/netfilter/xt_hashlimit.c 	unsigned int *bucket = v;
bucket           1089 net/netfilter/xt_hashlimit.c 	if (!IS_ERR(bucket))
bucket           1090 net/netfilter/xt_hashlimit.c 		kfree(bucket);
bucket           1173 net/netfilter/xt_hashlimit.c 	unsigned int *bucket = (unsigned int *)v;
bucket           1176 net/netfilter/xt_hashlimit.c 	if (!hlist_empty(&htable->hash[*bucket])) {
bucket           1177 net/netfilter/xt_hashlimit.c 		hlist_for_each_entry(ent, &htable->hash[*bucket], node)
bucket           1187 net/netfilter/xt_hashlimit.c 	unsigned int *bucket = v;
bucket           1190 net/netfilter/xt_hashlimit.c 	if (!hlist_empty(&htable->hash[*bucket])) {
bucket           1191 net/netfilter/xt_hashlimit.c 		hlist_for_each_entry(ent, &htable->hash[*bucket], node)
bucket           1201 net/netfilter/xt_hashlimit.c 	unsigned int *bucket = v;
bucket           1204 net/netfilter/xt_hashlimit.c 	if (!hlist_empty(&htable->hash[*bucket])) {
bucket           1205 net/netfilter/xt_hashlimit.c 		hlist_for_each_entry(ent, &htable->hash[*bucket], node)
bucket            468 net/netfilter/xt_recent.c 	unsigned int		bucket;
bucket            481 net/netfilter/xt_recent.c 	for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++)
bucket            482 net/netfilter/xt_recent.c 		list_for_each_entry(e, &t->iphash[st->bucket], list)
bucket            496 net/netfilter/xt_recent.c 	while (head == &t->iphash[st->bucket]) {
bucket            497 net/netfilter/xt_recent.c 		if (++st->bucket >= ip_list_hash_size)
bucket            499 net/netfilter/xt_recent.c 		head = t->iphash[st->bucket].next;
bucket           1400 net/openvswitch/datapath.c 		u32 bucket, obj;
bucket           1402 net/openvswitch/datapath.c 		bucket = cb->args[0];
bucket           1404 net/openvswitch/datapath.c 		flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
bucket           1414 net/openvswitch/datapath.c 		cb->args[0] = bucket;
bucket           2268 net/openvswitch/datapath.c 	int bucket = cb->args[0], skip = cb->args[1];
bucket           2277 net/openvswitch/datapath.c 	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
bucket            250 net/openvswitch/flow_table.c 				       u32 *bucket, u32 *last)
bucket            258 net/openvswitch/flow_table.c 	while (*bucket < ti->n_buckets) {
bucket            260 net/openvswitch/flow_table.c 		head = &ti->buckets[*bucket];
bucket            269 net/openvswitch/flow_table.c 		(*bucket)++;
bucket             61 net/openvswitch/flow_table.h 				       u32 *bucket, u32 *idx);
bucket            254 net/openvswitch/meter.c 		band->bucket = (band->burst_size + band->rate) * 1000;
bucket            255 net/openvswitch/meter.c 		band_max_delta_t = band->bucket / band->rate;
bucket            495 net/openvswitch/meter.c 		band->bucket += delta_ms * band->rate;
bucket            496 net/openvswitch/meter.c 		if (band->bucket > max_bucket_size)
bucket            497 net/openvswitch/meter.c 			band->bucket = max_bucket_size;
bucket            499 net/openvswitch/meter.c 		if (band->bucket >= cost) {
bucket            500 net/openvswitch/meter.c 			band->bucket -= cost;
bucket             26 net/openvswitch/meter.h 	u32 bucket; /* 1/1000 packets, or in bits */
bucket             96 net/openvswitch/vport.c 	struct hlist_head *bucket = hash_bucket(net, name);
bucket             99 net/openvswitch/vport.c 	hlist_for_each_entry_rcu(vport, bucket, hash_node)
bucket            194 net/openvswitch/vport.c 		struct hlist_head *bucket;
bucket            205 net/openvswitch/vport.c 		bucket = hash_bucket(ovs_dp_get_net(vport->dp),
bucket            207 net/openvswitch/vport.c 		hlist_add_head_rcu(&vport->hash_node, bucket);
bucket            256 net/rxrpc/proc.c 	unsigned int bucket, n;
bucket            266 net/rxrpc/proc.c 	bucket = *_pos >> shift;
bucket            268 net/rxrpc/proc.c 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
bucket            273 net/rxrpc/proc.c 			if (bucket == 0)
bucket            279 net/rxrpc/proc.c 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
bucket            282 net/rxrpc/proc.c 		bucket++;
bucket            284 net/rxrpc/proc.c 		*_pos = (bucket << shift) | n;
bucket            291 net/rxrpc/proc.c 	unsigned int bucket, n;
bucket            298 net/rxrpc/proc.c 	bucket = *_pos >> shift;
bucket            300 net/rxrpc/proc.c 	p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
bucket            305 net/rxrpc/proc.c 		bucket++;
bucket            307 net/rxrpc/proc.c 		*_pos = (bucket << shift) | n;
bucket            309 net/rxrpc/proc.c 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
bucket            318 net/rxrpc/proc.c 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
bucket            329 net/sched/sch_hhf.c static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket)
bucket            331 net/sched/sch_hhf.c 	struct sk_buff *skb = bucket->head;
bucket            333 net/sched/sch_hhf.c 	bucket->head = skb->next;
bucket            339 net/sched/sch_hhf.c static void bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb)
bucket            341 net/sched/sch_hhf.c 	if (bucket->head == NULL)
bucket            342 net/sched/sch_hhf.c 		bucket->head = skb;
bucket            344 net/sched/sch_hhf.c 		bucket->tail->next = skb;
bucket            345 net/sched/sch_hhf.c 	bucket->tail = skb;
bucket            352 net/sched/sch_hhf.c 	struct wdrr_bucket *bucket;
bucket            355 net/sched/sch_hhf.c 	bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
bucket            356 net/sched/sch_hhf.c 	if (!bucket->head)
bucket            357 net/sched/sch_hhf.c 		bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
bucket            359 net/sched/sch_hhf.c 	if (bucket->head) {
bucket            360 net/sched/sch_hhf.c 		struct sk_buff *skb = dequeue_head(bucket);
bucket            368 net/sched/sch_hhf.c 	return bucket - q->buckets;
bucket            376 net/sched/sch_hhf.c 	struct wdrr_bucket *bucket;
bucket            381 net/sched/sch_hhf.c 	bucket = &q->buckets[idx];
bucket            382 net/sched/sch_hhf.c 	bucket_add(bucket, skb);
bucket            385 net/sched/sch_hhf.c 	if (list_empty(&bucket->bucketchain)) {
bucket            395 net/sched/sch_hhf.c 			list_add_tail(&bucket->bucketchain, &q->old_buckets);
bucket            398 net/sched/sch_hhf.c 			list_add_tail(&bucket->bucketchain, &q->new_buckets);
bucket            400 net/sched/sch_hhf.c 		bucket->deficit = weight * q->quantum;
bucket            422 net/sched/sch_hhf.c 	struct wdrr_bucket *bucket;
bucket            432 net/sched/sch_hhf.c 	bucket = list_first_entry(head, struct wdrr_bucket, bucketchain);
bucket            434 net/sched/sch_hhf.c 	if (bucket->deficit <= 0) {
bucket            435 net/sched/sch_hhf.c 		int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
bucket            438 net/sched/sch_hhf.c 		bucket->deficit += weight * q->quantum;
bucket            439 net/sched/sch_hhf.c 		list_move_tail(&bucket->bucketchain, &q->old_buckets);
bucket            443 net/sched/sch_hhf.c 	if (bucket->head) {
bucket            444 net/sched/sch_hhf.c 		skb = dequeue_head(bucket);
bucket            452 net/sched/sch_hhf.c 			list_move_tail(&bucket->bucketchain, &q->old_buckets);
bucket            454 net/sched/sch_hhf.c 			list_del_init(&bucket->bucketchain);
bucket            458 net/sched/sch_hhf.c 	bucket->deficit -= qdisc_pkt_len(skb);
bucket            645 net/sched/sch_hhf.c 			struct wdrr_bucket *bucket = q->buckets + i;
bucket            647 net/sched/sch_hhf.c 			INIT_LIST_HEAD(&bucket->bucketchain);
bucket           2708 net/unix/af_unix.c 	unsigned long bucket = get_bucket(*pos);
bucket           2712 net/unix/af_unix.c 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
bucket           2726 net/unix/af_unix.c 	unsigned long bucket;
bucket           2742 net/unix/af_unix.c 		bucket = get_bucket(*pos) + 1;
bucket           2743 net/unix/af_unix.c 		*pos = set_bucket_offset(bucket, 1);
bucket           2744 net/unix/af_unix.c 	} while (bucket < ARRAY_SIZE(unix_socket_table));
bucket             52 net/vmw_vsock/diag.c 	unsigned int bucket;
bucket             63 net/vmw_vsock/diag.c 	bucket = cb->args[1];
bucket             72 net/vmw_vsock/diag.c 		while (bucket < ARRAY_SIZE(vsock_bind_table)) {
bucket             73 net/vmw_vsock/diag.c 			struct list_head *head = &vsock_bind_table[bucket];
bucket             94 net/vmw_vsock/diag.c 			bucket++;
bucket             98 net/vmw_vsock/diag.c 		bucket = 0;
bucket            102 net/vmw_vsock/diag.c 	while (bucket < ARRAY_SIZE(vsock_connected_table)) {
bucket            103 net/vmw_vsock/diag.c 		struct list_head *head = &vsock_connected_table[bucket];
bucket            128 net/vmw_vsock/diag.c 		bucket++;
bucket            135 net/vmw_vsock/diag.c 	cb->args[1] = bucket;
bucket             64 security/safesetid/securityfs.c 	int bucket;
bucket             68 security/safesetid/securityfs.c 	hash_for_each_safe(pol->rules, bucket, tmp, rule, next)
bucket             86 security/safesetid/securityfs.c 	int bucket;
bucket             90 security/safesetid/securityfs.c 	hash_for_each(pol->rules, bucket, rule, next) {
bucket             72 tools/testing/selftests/vDSO/parse_vdso.c 	ELF(Word) *bucket, *chain;
bucket            177 tools/testing/selftests/vDSO/parse_vdso.c 	vdso_info.bucket = &hash[2];
bucket            229 tools/testing/selftests/vDSO/parse_vdso.c 	ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket];