buckets            85 block/blk-stat.c 	for (bucket = 0; bucket < cb->buckets; bucket++)
buckets            92 block/blk-stat.c 		for (bucket = 0; bucket < cb->buckets; bucket++) {
buckets           104 block/blk-stat.c 			unsigned int buckets, void *data)
buckets           112 block/blk-stat.c 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
buckets           118 block/blk-stat.c 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
buckets           129 block/blk-stat.c 	cb->buckets = buckets;
buckets           145 block/blk-stat.c 		for (bucket = 0; bucket < cb->buckets; bucket++)
buckets            45 block/blk-stat.h 	unsigned int buckets;
buckets            87 block/blk-stat.h 			unsigned int buckets, void *data);
buckets           134 block/kyber-iosched.c 	atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
buckets           213 block/kyber-iosched.c 	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
buckets           214 block/kyber-iosched.c 	atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
buckets           218 block/kyber-iosched.c 		buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
buckets           229 block/kyber-iosched.c 	unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
buckets           233 block/kyber-iosched.c 		samples += buckets[bucket];
buckets           252 block/kyber-iosched.c 		if (buckets[bucket] >= percentile_samples)
buckets           254 block/kyber-iosched.c 		percentile_samples -= buckets[bucket];
buckets           256 block/kyber-iosched.c 	memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
buckets           632 block/kyber-iosched.c 	atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
buckets           273 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 	struct hlist_head *buckets; /** ht for relocation handles */
buckets           333 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
buckets           335 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			if (eb->buckets)
buckets           509 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 			       &eb->buckets[hash_32(entry->handle,
buckets           841 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		head = &eb->buckets[hash_32(handle, eb->lut_size)];
buckets           878 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		memset(eb->buckets, 0,
buckets           887 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 		kfree(eb->buckets);
buckets            84 drivers/gpu/drm/radeon/radeon_cs.c 	struct radeon_cs_buckets buckets;
buckets           102 drivers/gpu/drm/radeon/radeon_cs.c 	radeon_cs_buckets_init(&buckets);
buckets           189 drivers/gpu/drm/radeon/radeon_cs.c 		radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
buckets           193 drivers/gpu/drm/radeon/radeon_cs.c 	radeon_cs_buckets_get_list(&buckets, &p->validated);
buckets           298 drivers/infiniband/ulp/ipoib/ipoib.h 	struct ipoib_neigh __rcu      **buckets;
buckets          1279 drivers/infiniband/ulp/ipoib/ipoib_main.c 	for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
buckets          1325 drivers/infiniband/ulp/ipoib/ipoib_main.c 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
buckets          1404 drivers/infiniband/ulp/ipoib/ipoib_main.c 	for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
buckets          1430 drivers/infiniband/ulp/ipoib/ipoib_main.c 			   rcu_dereference_protected(htbl->buckets[hash_val],
buckets          1432 drivers/infiniband/ulp/ipoib/ipoib_main.c 	rcu_assign_pointer(htbl->buckets[hash_val], neigh);
buckets          1489 drivers/infiniband/ulp/ipoib/ipoib_main.c 	np = &htbl->buckets[hash_val];
buckets          1514 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct ipoib_neigh __rcu **buckets;
buckets          1523 drivers/infiniband/ulp/ipoib/ipoib_main.c 	buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
buckets          1524 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (!buckets) {
buckets          1530 drivers/infiniband/ulp/ipoib/ipoib_main.c 	htbl->buckets = buckets;
buckets          1547 drivers/infiniband/ulp/ipoib/ipoib_main.c 	struct ipoib_neigh __rcu **buckets = htbl->buckets;
buckets          1550 drivers/infiniband/ulp/ipoib/ipoib_main.c 	kvfree(buckets);
buckets          1574 drivers/infiniband/ulp/ipoib/ipoib_main.c 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
buckets          1619 drivers/infiniband/ulp/ipoib/ipoib_main.c 		struct ipoib_neigh __rcu **np = &htbl->buckets[i];
buckets           398 drivers/lightnvm/pblk-sysfs.c 	int buckets = pblk->min_write_pgs - 1;
buckets           403 drivers/lightnvm/pblk-sysfs.c 		for (i = 0; i < (buckets + 1); i++)
buckets           411 drivers/lightnvm/pblk-sysfs.c 	for (i = 0; i < buckets; i++)
buckets           417 drivers/lightnvm/pblk-sysfs.c 	for (i = 0; i < buckets; i++) {
buckets           528 drivers/lightnvm/pblk-sysfs.c 	int buckets = pblk->min_write_pgs - 1;
buckets           541 drivers/lightnvm/pblk-sysfs.c 	for (i = 0; i < buckets; i++)
buckets           147 drivers/md/bcache/alloc.c 		trace_bcache_invalidate(ca, b - ca->buckets);
buckets           158 drivers/md/bcache/alloc.c 	fifo_push(&ca->free_inc, b - ca->buckets);
buckets           227 drivers/md/bcache/alloc.c 		b = ca->buckets + ca->fifo_last_bucket++;
buckets           253 drivers/md/bcache/alloc.c 		b = ca->buckets + n;
buckets           446 drivers/md/bcache/alloc.c 	b = ca->buckets + r;
buckets           513 drivers/md/bcache/alloc.c 		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
buckets           445 drivers/md/bcache/bcache.h 	struct bucket		*buckets;
buckets           800 drivers/md/bcache/bcache.h 	return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
buckets           871 drivers/md/bcache/bcache.h 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
buckets           872 drivers/md/bcache/bcache.h 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
buckets          1806 drivers/md/bcache/btree.c 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
buckets          1810 drivers/md/bcache/btree.c 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
buckets          1991 drivers/md/bcache/btree.c 				   b - ca->buckets))
buckets          1993 drivers/md/bcache/btree.c 						  b - ca->buckets);
buckets           570 drivers/md/bcache/super.c 		for (b = ca->buckets + i * prios_per_bucket(ca);
buckets           571 drivers/md/bcache/super.c 		     b < ca->buckets + ca->sb.nbuckets && d < end;
buckets           589 drivers/md/bcache/super.c 		atomic_dec_bug(&ca->buckets[bucket].pin);
buckets           606 drivers/md/bcache/super.c 				&ca->buckets[ca->prio_last_buckets[i]]);
buckets           620 drivers/md/bcache/super.c 	for (b = ca->buckets;
buckets           621 drivers/md/bcache/super.c 	     b < ca->buckets + ca->sb.nbuckets;
buckets          2131 drivers/md/bcache/super.c 	vfree(ca->buckets);
buckets          2211 drivers/md/bcache/super.c 	ca->buckets = vzalloc(array_size(sizeof(struct bucket),
buckets          2213 drivers/md/bcache/super.c 	if (!ca->buckets) {
buckets          2241 drivers/md/bcache/super.c 	vfree(ca->buckets);
buckets          1043 drivers/md/bcache/sysfs.c 			p[i] = ca->buckets[i].prio;
buckets           576 drivers/md/dm-cache-policy-smq.c 	unsigned *buckets;
buckets           591 drivers/md/dm-cache-policy-smq.c 	ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
buckets           592 drivers/md/dm-cache-policy-smq.c 	if (!ht->buckets)
buckets           596 drivers/md/dm-cache-policy-smq.c 		ht->buckets[i] = INDEXER_NULL;
buckets           603 drivers/md/dm-cache-policy-smq.c 	vfree(ht->buckets);
buckets           608 drivers/md/dm-cache-policy-smq.c 	return to_entry(ht->es, ht->buckets[bucket]);
buckets           618 drivers/md/dm-cache-policy-smq.c 	e->hash_next = ht->buckets[bucket];
buckets           619 drivers/md/dm-cache-policy-smq.c 	ht->buckets[bucket] = to_index(ht->es, e);
buckets           650 drivers/md/dm-cache-policy-smq.c 		ht->buckets[h] = e->hash_next;
buckets            73 drivers/md/dm-ioctl.c static void init_buckets(struct list_head *buckets)
buckets            78 drivers/md/dm-ioctl.c 		INIT_LIST_HEAD(buckets + i);
buckets            70 drivers/md/dm-region-hash.c 	struct list_head *buckets;
buckets           206 drivers/md/dm-region-hash.c 	rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
buckets           207 drivers/md/dm-region-hash.c 	if (!rh->buckets) {
buckets           214 drivers/md/dm-region-hash.c 		INIT_LIST_HEAD(rh->buckets + i);
buckets           228 drivers/md/dm-region-hash.c 		vfree(rh->buckets);
buckets           244 drivers/md/dm-region-hash.c 		list_for_each_entry_safe(reg, nreg, rh->buckets + h,
buckets           255 drivers/md/dm-region-hash.c 	vfree(rh->buckets);
buckets           274 drivers/md/dm-region-hash.c 	struct list_head *bucket = rh->buckets + rh_hash(rh, region);
buckets           285 drivers/md/dm-region-hash.c 	list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
buckets            96 drivers/md/persistent-data/dm-transaction-manager.c 	struct hlist_head buckets[DM_HASH_SIZE];
buckets           110 drivers/md/persistent-data/dm-transaction-manager.c 	hlist_for_each_entry(si, tm->buckets + bucket, hlist)
buckets           134 drivers/md/persistent-data/dm-transaction-manager.c 		hlist_add_head(&si->hlist, tm->buckets + bucket);
buckets           148 drivers/md/persistent-data/dm-transaction-manager.c 		bucket = tm->buckets + i;
buckets           177 drivers/md/persistent-data/dm-transaction-manager.c 		INIT_HLIST_HEAD(tm->buckets + i);
buckets          2138 drivers/media/v4l2-core/v4l2-ctrls.c 	hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
buckets          2139 drivers/media/v4l2-core/v4l2-ctrls.c 				      sizeof(hdl->buckets[0]),
buckets          2141 drivers/media/v4l2-core/v4l2-ctrls.c 	hdl->error = hdl->buckets ? 0 : -ENOMEM;
buckets          2154 drivers/media/v4l2-core/v4l2-ctrls.c 	if (hdl == NULL || hdl->buckets == NULL)
buckets          2178 drivers/media/v4l2-core/v4l2-ctrls.c 	kvfree(hdl->buckets);
buckets          2179 drivers/media/v4l2-core/v4l2-ctrls.c 	hdl->buckets = NULL;
buckets          2232 drivers/media/v4l2-core/v4l2-ctrls.c 	ref = hdl->buckets ? hdl->buckets[bucket] : NULL;
buckets          2330 drivers/media/v4l2-core/v4l2-ctrls.c 	new_ref->next = hdl->buckets[bucket];
buckets          2331 drivers/media/v4l2-core/v4l2-ctrls.c 	hdl->buckets[bucket] = new_ref;
buckets          1149 drivers/message/fusion/mptlan.c 	u32 curr, buckets, count, max;
buckets          1155 drivers/message/fusion/mptlan.c 	buckets = (priv->max_buckets_out - curr);
buckets          1159 drivers/message/fusion/mptlan.c 			__func__, buckets, curr));
buckets          1164 drivers/message/fusion/mptlan.c 	while (buckets) {
buckets          1170 drivers/message/fusion/mptlan.c 				 __func__, buckets));
buckets          1177 drivers/message/fusion/mptlan.c 		count = buckets;
buckets          1274 drivers/message/fusion/mptlan.c 		buckets -= i;
buckets          1280 drivers/message/fusion/mptlan.c 		  __func__, buckets, atomic_read(&priv->buckets_out)));
buckets            45 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP];
buckets            71 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets,
buckets            74 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
buckets            99 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	hlist_add_head(&new_node->hlist, &ht->buckets[key]);
buckets           112 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
buckets           131 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c 	node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
buckets            43 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	struct mlx5dr_icm_bucket *buckets;
buckets           338 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		hot_size += pool->buckets[chunk_order].hot_list_count *
buckets           379 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				       bool buckets[DR_CHUNK_SIZE_MAX])
buckets           385 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
buckets           396 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			buckets[i] = true;
buckets           403 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				     bool buckets[DR_CHUNK_SIZE_MAX])
buckets           409 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
buckets           415 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (!buckets[i])
buckets           425 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				       bool buckets[DR_CHUNK_SIZE_MAX])
buckets           431 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		bucket = &pool->buckets[i];
buckets           437 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		if (!buckets[i])
buckets           453 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bool buckets[DR_CHUNK_SIZE_MAX] = {};
buckets           460 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	bucket = &pool->buckets[chunk_size];
buckets           467 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_start(pool, bucket, buckets);
buckets           470 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				dr_icm_chill_buckets_abort(pool, bucket, buckets);
buckets           475 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 			dr_icm_chill_buckets_end(pool, bucket, buckets);
buckets           532 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	pool->buckets = kcalloc(max_log_chunk_sz + 1,
buckets           533 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 				sizeof(pool->buckets[0]),
buckets           535 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	if (!pool->buckets)
buckets           545 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_bucket_init(pool, &pool->buckets[i], i);
buckets           567 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 		dr_icm_bucket_cleanup(&pool->buckets[i]);
buckets           569 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c 	kfree(pool->buckets);
buckets           294 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 				   struct brcmf_gscan_bucket_config **buckets,
buckets           319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	*buckets = NULL;
buckets           351 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	*buckets = fw_buckets;
buckets           392 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	struct brcmf_gscan_bucket_config *buckets;
buckets           399 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
buckets           404 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
buckets           433 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	memcpy(&gscan_cfg->bucket[0], buckets,
buckets           434 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	       n_buckets * sizeof(*buckets));
buckets           459 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c 	kfree(buckets);
buckets            24 drivers/s390/scsi/zfcp_reqlist.h 	struct list_head buckets[ZFCP_REQ_LIST_BUCKETS];
buckets            50 drivers/s390/scsi/zfcp_reqlist.h 		INIT_LIST_HEAD(&rl->buckets[i]);
buckets            66 drivers/s390/scsi/zfcp_reqlist.h 		if (!list_empty(&rl->buckets[i]))
buckets            90 drivers/s390/scsi/zfcp_reqlist.h 	list_for_each_entry(req, &rl->buckets[i], list)
buckets           163 drivers/s390/scsi/zfcp_reqlist.h 	list_add_tail(&req->list, &rl->buckets[i]);
buckets           180 drivers/s390/scsi/zfcp_reqlist.h 		list_splice_init(&rl->buckets[i], list);
buckets           207 drivers/s390/scsi/zfcp_reqlist.h 		list_for_each_entry(req, &rl->buckets[i], list)
buckets           176 fs/nfs/direct.c 			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
buckets           756 fs/nfs/filelayout/filelayout.c 		kfree(flo->commit_info.buckets);
buckets           757 fs/nfs/filelayout/filelayout.c 		flo->commit_info.buckets = NULL;
buckets           768 fs/nfs/filelayout/filelayout.c 	struct pnfs_commit_bucket *buckets;
buckets           787 fs/nfs/filelayout/filelayout.c 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
buckets           789 fs/nfs/filelayout/filelayout.c 	if (!buckets)
buckets           792 fs/nfs/filelayout/filelayout.c 		INIT_LIST_HEAD(&buckets[i].written);
buckets           793 fs/nfs/filelayout/filelayout.c 		INIT_LIST_HEAD(&buckets[i].committing);
buckets           795 fs/nfs/filelayout/filelayout.c 		buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
buckets           802 fs/nfs/filelayout/filelayout.c 		list_splice(&cinfo->ds->buckets[i].written,
buckets           803 fs/nfs/filelayout/filelayout.c 			    &buckets[i].written);
buckets           804 fs/nfs/filelayout/filelayout.c 		list_splice(&cinfo->ds->buckets[i].committing,
buckets           805 fs/nfs/filelayout/filelayout.c 			    &buckets[i].committing);
buckets           806 fs/nfs/filelayout/filelayout.c 		buckets[i].direct_verf.committed =
buckets           807 fs/nfs/filelayout/filelayout.c 			cinfo->ds->buckets[i].direct_verf.committed;
buckets           808 fs/nfs/filelayout/filelayout.c 		buckets[i].wlseg = cinfo->ds->buckets[i].wlseg;
buckets           809 fs/nfs/filelayout/filelayout.c 		buckets[i].clseg = cinfo->ds->buckets[i].clseg;
buckets           811 fs/nfs/filelayout/filelayout.c 	swap(cinfo->ds->buckets, buckets);
buckets           815 fs/nfs/filelayout/filelayout.c 	kfree(buckets);
buckets          1097 fs/nfs/filelayout/filelayout.c 	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
buckets           590 fs/nfs/flexfilelayout/flexfilelayout.c 			kfree(ffl->commit_info.buckets);
buckets           591 fs/nfs/flexfilelayout/flexfilelayout.c 			ffl->commit_info.buckets = NULL;
buckets           755 fs/nfs/flexfilelayout/flexfilelayout.c 	struct pnfs_commit_bucket *buckets;
buckets           769 fs/nfs/flexfilelayout/flexfilelayout.c 	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
buckets           771 fs/nfs/flexfilelayout/flexfilelayout.c 	if (!buckets)
buckets           778 fs/nfs/flexfilelayout/flexfilelayout.c 			kfree(buckets);
buckets           780 fs/nfs/flexfilelayout/flexfilelayout.c 			cinfo->ds->buckets = buckets;
buckets           783 fs/nfs/flexfilelayout/flexfilelayout.c 				INIT_LIST_HEAD(&buckets[i].written);
buckets           784 fs/nfs/flexfilelayout/flexfilelayout.c 				INIT_LIST_HEAD(&buckets[i].committing);
buckets           786 fs/nfs/flexfilelayout/flexfilelayout.c 				buckets[i].direct_verf.committed =
buckets           524 fs/nfs/internal.h 		cinfo->buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
buckets           124 fs/nfs/pnfs_nfs.c 		cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
buckets           144 fs/nfs/pnfs_nfs.c 	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
buckets           170 fs/nfs/pnfs_nfs.c 		bucket = &fl_cinfo->buckets[i];
buckets           197 fs/nfs/pnfs_nfs.c 	bucket = fl_cinfo->buckets;
buckets           222 fs/nfs/pnfs_nfs.c 	bucket = &cinfo->ds->buckets[data->ds_commit_index];
buckets           933 fs/nfs/pnfs_nfs.c 	struct pnfs_commit_bucket *buckets;
buckets           936 fs/nfs/pnfs_nfs.c 	buckets = cinfo->ds->buckets;
buckets           937 fs/nfs/pnfs_nfs.c 	list = &buckets[ds_commit_idx].written;
buckets           950 fs/nfs/pnfs_nfs.c 		WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
buckets           951 fs/nfs/pnfs_nfs.c 		buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
buckets           240 include/linux/crush/crush.h 	struct crush_bucket **buckets;
buckets          2972 include/linux/netdevice.h 	u8			buckets[];
buckets          1277 include/linux/nfs_xdr.h 	struct pnfs_commit_bucket *buckets;
buckets          1393 include/linux/nfs_xdr.h 	kfree(cinfo->buckets);
buckets            87 include/linux/rhashtable.h 	struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
buckets           291 include/linux/rhashtable.h 				     &tbl->buckets[hash];
buckets           298 include/linux/rhashtable.h 				     &tbl->buckets[hash];
buckets           305 include/linux/rhashtable.h 				     &tbl->buckets[hash];
buckets           337 include/media/v4l2-ctrls.h 	struct v4l2_ctrl_ref **buckets;
buckets           440 include/trace/events/bcache.h 		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
buckets            27 kernel/bpf/hashtab.c 	struct bucket *buckets;
buckets           361 kernel/bpf/hashtab.c 	htab->buckets = bpf_map_area_alloc(htab->n_buckets *
buckets           364 kernel/bpf/hashtab.c 	if (!htab->buckets)
buckets           373 kernel/bpf/hashtab.c 		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
buckets           374 kernel/bpf/hashtab.c 		raw_spin_lock_init(&htab->buckets[i].lock);
buckets           397 kernel/bpf/hashtab.c 	bpf_map_area_free(htab->buckets);
buckets           412 kernel/bpf/hashtab.c 	return &htab->buckets[hash & (htab->n_buckets - 1)];
buckets          1210 kernel/bpf/hashtab.c 	bpf_map_area_free(htab->buckets);
buckets            30 kernel/bpf/stackmap.c 	struct stack_map_bucket *buckets[];
buckets           387 kernel/bpf/stackmap.c 	bucket = READ_ONCE(smap->buckets[id]);
buckets           431 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], new_bucket);
buckets           529 kernel/bpf/stackmap.c 	bucket = xchg(&smap->buckets[id], NULL);
buckets           537 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], bucket);
buckets           556 kernel/bpf/stackmap.c 		if (id >= smap->n_buckets || !smap->buckets[id])
buckets           562 kernel/bpf/stackmap.c 	while (id < smap->n_buckets && !smap->buckets[id])
buckets           588 kernel/bpf/stackmap.c 	old_bucket = xchg(&smap->buckets[id], NULL);
buckets          1043 kernel/trace/ftrace.c 	.buckets = (struct hlist_head *)empty_buckets,
buckets          1133 kernel/trace/ftrace.c 	hhd = &hash->buckets[key];
buckets          1168 kernel/trace/ftrace.c 	hhd = &hash->buckets[key];
buckets          1216 kernel/trace/ftrace.c 		hhd = &hash->buckets[i];
buckets          1250 kernel/trace/ftrace.c 	kfree(hash->buckets);
buckets          1286 kernel/trace/ftrace.c 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
buckets          1288 kernel/trace/ftrace.c 	if (!hash->buckets) {
buckets          1349 kernel/trace/ftrace.c 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
buckets          1408 kernel/trace/ftrace.c 		hhd = &src->buckets[i];
buckets          3120 kernel/trace/ftrace.c 	hhd = &hash->buckets[iter->pidx];
buckets          4265 kernel/trace/ftrace.c 			hhd = &mapper->hash.buckets[i];
buckets          4381 kernel/trace/ftrace.c 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
buckets          4443 kernel/trace/ftrace.c 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
buckets          4522 kernel/trace/ftrace.c 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
buckets          5146 kernel/trace/ftrace.c 		head = &fgd->hash->buckets[i];
buckets           880 kernel/trace/trace.h 	struct hlist_head	*buckets;
buckets            59 lib/rhashtable.c 	return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
buckets            92 lib/rhashtable.c 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
buckets           149 lib/rhashtable.c 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
buckets           155 lib/rhashtable.c 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
buckets           175 lib/rhashtable.c 	tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
buckets           197 lib/rhashtable.c 		INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
buckets           247 lib/rhashtable.c 	rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
buckets           249 lib/rhashtable.c 	head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
buckets           253 lib/rhashtable.c 	rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
buckets          1176 lib/rhashtable.c 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
buckets          1216 lib/rhashtable.c 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
buckets           500 lib/test_rhashtable.c 		pos = rht_ptr_exclusive(tbl->buckets + i);
buckets           112 net/ceph/crush/crush.c 	if (map->buckets) {
buckets           115 net/ceph/crush/crush.c 			if (map->buckets[b] == NULL)
buckets           117 net/ceph/crush/crush.c 			crush_destroy_bucket(map->buckets[b]);
buckets           119 net/ceph/crush/crush.c 		kfree(map->buckets);
buckets           527 net/ceph/crush/mapper.c 					itemtype = map->buckets[-1-item]->type;
buckets           540 net/ceph/crush/mapper.c 					in = map->buckets[-1-item];
buckets           564 net/ceph/crush/mapper.c 							    map->buckets[-1-item],
buckets           741 net/ceph/crush/mapper.c 					itemtype = map->buckets[-1-item]->type;
buckets           758 net/ceph/crush/mapper.c 					in = map->buckets[-1-item];
buckets           778 net/ceph/crush/mapper.c 							map->buckets[-1-item],
buckets           865 net/ceph/crush/mapper.c 		if (!map->buckets[b])
buckets           869 net/ceph/crush/mapper.c 		switch (map->buckets[b]->alg) {
buckets           877 net/ceph/crush/mapper.c 		v += map->buckets[b]->size * sizeof(__u32);
buckets           949 net/ceph/crush/mapper.c 			     map->buckets[-1-curstep->arg1])) {
buckets          1033 net/ceph/crush/mapper.c 						map->buckets[bno],
buckets          1055 net/ceph/crush/mapper.c 						map->buckets[bno],
buckets           300 net/ceph/osdmap.c 			    arg->ids_size != c->buckets[bucket_index]->size)
buckets           325 net/ceph/osdmap.c 		if (!c->buckets[b])
buckets           328 net/ceph/osdmap.c 		switch (c->buckets[b]->alg) {
buckets           338 net/ceph/osdmap.c 		c->working_size += c->buckets[b]->size * sizeof(__u32);
buckets           376 net/ceph/osdmap.c 	c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
buckets           377 net/ceph/osdmap.c 	if (c->buckets == NULL)
buckets           391 net/ceph/osdmap.c 			c->buckets[i] = NULL;
buckets           417 net/ceph/osdmap.c 		b = c->buckets[i] = kzalloc(size, GFP_NOFS);
buckets            49 net/core/bpf_sk_storage.c 	struct bucket *buckets;
buckets            96 net/core/bpf_sk_storage.c 	return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
buckets           578 net/core/bpf_sk_storage.c 		b = &smap->buckets[i];
buckets           605 net/core/bpf_sk_storage.c 	kvfree(smap->buckets);
buckets           650 net/core/bpf_sk_storage.c 	cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
buckets           658 net/core/bpf_sk_storage.c 	smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
buckets           660 net/core/bpf_sk_storage.c 	if (!smap->buckets) {
buckets           667 net/core/bpf_sk_storage.c 		INIT_HLIST_HEAD(&smap->buckets[i].list);
buckets           668 net/core/bpf_sk_storage.c 		raw_spin_lock_init(&smap->buckets[i].lock);
buckets          4140 net/core/dev.c 		if (likely(fl->buckets[old_flow]))
buckets          4141 net/core/dev.c 			fl->buckets[old_flow]--;
buckets          4143 net/core/dev.c 		if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
buckets           438 net/core/neighbour.c 	struct neighbour __rcu **buckets;
buckets           445 net/core/neighbour.c 		buckets = kzalloc(size, GFP_ATOMIC);
buckets           447 net/core/neighbour.c 		buckets = (struct neighbour __rcu **)
buckets           450 net/core/neighbour.c 		kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
buckets           452 net/core/neighbour.c 	if (!buckets) {
buckets           456 net/core/neighbour.c 	ret->hash_buckets = buckets;
buckets           469 net/core/neighbour.c 	struct neighbour __rcu **buckets = nht->hash_buckets;
buckets           472 net/core/neighbour.c 		kfree(buckets);
buckets           474 net/core/neighbour.c 		kmemleak_free(buckets);
buckets           475 net/core/neighbour.c 		free_pages((unsigned long)buckets, get_order(size));
buckets           533 net/core/sock_map.c 	struct bpf_htab_bucket *buckets;
buckets           548 net/core/sock_map.c 	return &htab->buckets[hash & (htab->buckets_num - 1)];
buckets           841 net/core/sock_map.c 	htab->buckets = bpf_map_area_alloc(htab->buckets_num *
buckets           844 net/core/sock_map.c 	if (!htab->buckets) {
buckets           850 net/core/sock_map.c 		INIT_HLIST_HEAD(&htab->buckets[i].head);
buckets           851 net/core/sock_map.c 		raw_spin_lock_init(&htab->buckets[i].lock);
buckets           891 net/core/sock_map.c 	bpf_map_area_free(htab->buckets);
buckets            64 net/netfilter/ipvs/ip_vs_dh.c 	struct ip_vs_dh_bucket		buckets[IP_VS_DH_TAB_SIZE];
buckets            90 net/netfilter/ipvs/ip_vs_dh.c 	return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest);
buckets           106 net/netfilter/ipvs/ip_vs_dh.c 	b = &s->buckets[0];
buckets           140 net/netfilter/ipvs/ip_vs_dh.c 	b = &s->buckets[0];
buckets            70 net/netfilter/ipvs/ip_vs_sh.c 	struct ip_vs_sh_bucket		buckets[IP_VS_SH_TAB_SIZE];
buckets           108 net/netfilter/ipvs/ip_vs_sh.c 	struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest);
buckets           130 net/netfilter/ipvs/ip_vs_sh.c 	dest = rcu_dereference(s->buckets[ihash].dest);
buckets           145 net/netfilter/ipvs/ip_vs_sh.c 		dest = rcu_dereference(s->buckets[hash].dest);
buckets           172 net/netfilter/ipvs/ip_vs_sh.c 	b = &s->buckets[0];
buckets           216 net/netfilter/ipvs/ip_vs_sh.c 	b = &s->buckets[0];
buckets          1224 net/netfilter/nf_conntrack_core.c 	unsigned int i, goal, buckets = 0, expired_count = 0;
buckets          1296 net/netfilter/nf_conntrack_core.c 	} while (++buckets < goal);
buckets           413 net/netfilter/nft_set_hash.c 	u32				buckets;
buckets           431 net/netfilter/nft_set_hash.c 	hash = reciprocal_scale(hash, priv->buckets);
buckets           451 net/netfilter/nft_set_hash.c 	hash = reciprocal_scale(hash, priv->buckets);
buckets           471 net/netfilter/nft_set_hash.c 	hash = reciprocal_scale(hash, priv->buckets);
buckets           495 net/netfilter/nft_set_hash.c 	hash = reciprocal_scale(hash, priv->buckets);
buckets           577 net/netfilter/nft_set_hash.c 	for (i = 0; i < priv->buckets; i++) {
buckets           608 net/netfilter/nft_set_hash.c 	priv->buckets = nft_hash_buckets(desc->size);
buckets           621 net/netfilter/nft_set_hash.c 	for (i = 0; i < priv->buckets; i++) {
buckets           137 net/openvswitch/flow_table.c 	kvfree(ti->buckets);
buckets           149 net/openvswitch/flow_table.c 	ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
buckets           151 net/openvswitch/flow_table.c 	if (!ti->buckets) {
buckets           157 net/openvswitch/flow_table.c 		INIT_HLIST_HEAD(&ti->buckets[i]);
buckets           215 net/openvswitch/flow_table.c 		struct hlist_head *head = &ti->buckets[i];
buckets           260 net/openvswitch/flow_table.c 		head = &ti->buckets[*bucket];
buckets           279 net/openvswitch/flow_table.c 	return &ti->buckets[hash & (ti->n_buckets - 1)];
buckets           312 net/openvswitch/flow_table.c 		struct hlist_head *head = &old->buckets[i];
buckets            26 net/openvswitch/flow_table.h 	struct hlist_head *buckets;
buckets           128 net/sched/sch_hhf.c 	struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
buckets           355 net/sched/sch_hhf.c 	bucket = &q->buckets[WDRR_BUCKET_FOR_HH];
buckets           357 net/sched/sch_hhf.c 		bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH];
buckets           368 net/sched/sch_hhf.c 	return bucket - q->buckets;
buckets           381 net/sched/sch_hhf.c 	bucket = &q->buckets[idx];
buckets           435 net/sched/sch_hhf.c 		int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ?
buckets           645 net/sched/sch_hhf.c 			struct wdrr_bucket *bucket = q->buckets + i;
buckets            39 tools/lib/bpf/hashmap.c 	map->buckets = NULL;
buckets            59 tools/lib/bpf/hashmap.c 	free(map->buckets);
buckets           112 tools/lib/bpf/hashmap.c 	free(map->buckets);
buckets           113 tools/lib/bpf/hashmap.c 	map->buckets = new_buckets;
buckets           125 tools/lib/bpf/hashmap.c 	if (!map->buckets)
buckets           128 tools/lib/bpf/hashmap.c 	for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
buckets           188 tools/lib/bpf/hashmap.c 	hashmap_add_entry(&map->buckets[h], entry);
buckets            40 tools/lib/bpf/hashmap.h 	struct hashmap_entry **buckets;
buckets            50 tools/lib/bpf/hashmap.h 	.buckets = NULL,			\
buckets           140 tools/lib/bpf/hashmap.h 		for (cur = map->buckets[bkt]; cur; cur = cur->next)
buckets           152 tools/lib/bpf/hashmap.h 		for (cur = map->buckets[bkt];				    \
buckets           165 tools/lib/bpf/hashmap.h 		     map->buckets ? map->buckets[bkt] : NULL; });	    \
buckets           173 tools/lib/bpf/hashmap.h 		     cur = map->buckets ? map->buckets[bkt] : NULL; });	    \