Lines Matching refs:tbl
145 struct bucket_table __rcu *tbl; member
162 struct bucket_table *tbl; member
205 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument
208 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); in rht_bucket_index()
212 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument
219 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); in rht_key_hashfn()
224 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn()
226 hash = jhash(key, key_len, tbl->hash_rnd); in rht_key_hashfn()
229 tbl->hash_rnd); in rht_key_hashfn()
234 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn()
236 hash = jhash(key, key_len, tbl->hash_rnd); in rht_key_hashfn()
239 return rht_bucket_index(tbl, hash); in rht_key_hashfn()
243 struct rhashtable *ht, const struct bucket_table *tbl, in rht_head_hashfn() argument
249 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: in rht_head_hashfn()
251 tbl->hash_rnd)) : in rht_head_hashfn()
252 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); in rht_head_hashfn()
261 const struct bucket_table *tbl) in rht_grow_above_75() argument
264 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && in rht_grow_above_75()
265 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_75()
274 const struct bucket_table *tbl) in rht_shrink_below_30() argument
277 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && in rht_shrink_below_30()
278 tbl->size > ht->p.min_size; in rht_shrink_below_30()
287 const struct bucket_table *tbl) in rht_grow_above_100() argument
289 return atomic_read(&ht->nelems) > tbl->size && in rht_grow_above_100()
290 (!ht->p.max_size || tbl->size < ht->p.max_size); in rht_grow_above_100()
299 const struct bucket_table *tbl) in rht_grow_above_max() argument
318 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl, in rht_bucket_lock() argument
321 return &tbl->locks[hash & tbl->locks_mask]; in rht_bucket_lock()
326 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
333 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, in lockdep_rht_bucket_is_held() argument
347 int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
366 #define rht_dereference_bucket(p, tbl, hash) \ argument
367 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
369 #define rht_dereference_bucket_rcu(p, tbl, hash) \ argument
370 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
382 #define rht_for_each_continue(pos, head, tbl, hash) \ argument
383 for (pos = rht_dereference_bucket(head, tbl, hash); \
385 pos = rht_dereference_bucket((pos)->next, tbl, hash))
393 #define rht_for_each(pos, tbl, hash) \ argument
394 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
405 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ argument
406 for (pos = rht_dereference_bucket(head, tbl, hash); \
408 pos = rht_dereference_bucket((pos)->next, tbl, hash))
418 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ argument
419 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
420 tbl, hash, member)
434 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ argument
435 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
437 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
441 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
454 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ argument
456 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
470 #define rht_for_each_rcu(pos, tbl, hash) \ argument
471 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
486 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ argument
488 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
490 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
504 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ argument
505 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
506 tbl, hash, member)
536 const struct bucket_table *tbl; in rhashtable_lookup_fast() local
542 tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_lookup_fast()
544 hash = rht_key_hashfn(ht, tbl, key, params); in rhashtable_lookup_fast()
545 rht_for_each_rcu(he, tbl, hash) { in rhashtable_lookup_fast()
557 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_lookup_fast()
558 if (unlikely(tbl)) in rhashtable_lookup_fast()
574 struct bucket_table *tbl, *new_tbl; in __rhashtable_insert_fast() local
584 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_insert_fast()
590 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_insert_fast()
591 lock = rht_bucket_lock(tbl, hash); in __rhashtable_insert_fast()
594 if (tbl->rehash <= hash) in __rhashtable_insert_fast()
598 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_insert_fast()
601 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_insert_fast()
603 tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); in __rhashtable_insert_fast()
604 if (!IS_ERR_OR_NULL(tbl)) in __rhashtable_insert_fast()
607 err = PTR_ERR(tbl); in __rhashtable_insert_fast()
612 if (unlikely(rht_grow_above_max(ht, tbl))) in __rhashtable_insert_fast()
615 if (unlikely(rht_grow_above_100(ht, tbl))) { in __rhashtable_insert_fast()
618 err = rhashtable_insert_rehash(ht, tbl); in __rhashtable_insert_fast()
628 rht_for_each(head, tbl, hash) { in __rhashtable_insert_fast()
640 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); in __rhashtable_insert_fast()
644 rcu_assign_pointer(tbl->buckets[hash], obj); in __rhashtable_insert_fast()
647 if (rht_grow_above_75(ht, tbl)) in __rhashtable_insert_fast()
746 struct rhashtable *ht, struct bucket_table *tbl, in __rhashtable_remove_fast() argument
755 hash = rht_head_hashfn(ht, tbl, obj, params); in __rhashtable_remove_fast()
756 lock = rht_bucket_lock(tbl, hash); in __rhashtable_remove_fast()
760 pprev = &tbl->buckets[hash]; in __rhashtable_remove_fast()
761 rht_for_each(he, tbl, hash) { in __rhashtable_remove_fast()
796 struct bucket_table *tbl; in rhashtable_remove_fast() local
801 tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_remove_fast()
808 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) && in rhashtable_remove_fast()
809 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in rhashtable_remove_fast()
817 rht_shrink_below_30(ht, tbl))) in rhashtable_remove_fast()