/linux-4.1.27/net/ceph/crush/ |
H A D | hash.c | 3 #include <linux/crush/hash.h> 7 * http://burtleburtle.net/bob/hash/evahash.html 26 __u32 hash = crush_hash_seed ^ a; crush_hash32_rjenkins1() local 30 crush_hashmix(b, x, hash); crush_hash32_rjenkins1() 31 crush_hashmix(y, a, hash); crush_hash32_rjenkins1() 32 return hash; crush_hash32_rjenkins1() 37 __u32 hash = crush_hash_seed ^ a ^ b; crush_hash32_rjenkins1_2() local 40 crush_hashmix(a, b, hash); crush_hash32_rjenkins1_2() 41 crush_hashmix(x, a, hash); crush_hash32_rjenkins1_2() 42 crush_hashmix(b, y, hash); crush_hash32_rjenkins1_2() 43 return hash; crush_hash32_rjenkins1_2() 48 __u32 hash = crush_hash_seed ^ a ^ b ^ c; crush_hash32_rjenkins1_3() local 51 crush_hashmix(a, b, hash); crush_hash32_rjenkins1_3() 52 crush_hashmix(c, x, hash); crush_hash32_rjenkins1_3() 53 crush_hashmix(y, a, hash); crush_hash32_rjenkins1_3() 54 crush_hashmix(b, x, hash); crush_hash32_rjenkins1_3() 55 crush_hashmix(y, c, hash); crush_hash32_rjenkins1_3() 56 return hash; crush_hash32_rjenkins1_3() 61 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d; crush_hash32_rjenkins1_4() local 64 crush_hashmix(a, b, hash); crush_hash32_rjenkins1_4() 65 crush_hashmix(c, d, hash); crush_hash32_rjenkins1_4() 66 crush_hashmix(a, x, hash); crush_hash32_rjenkins1_4() 67 crush_hashmix(y, b, hash); crush_hash32_rjenkins1_4() 68 crush_hashmix(c, x, hash); crush_hash32_rjenkins1_4() 69 crush_hashmix(y, d, hash); crush_hash32_rjenkins1_4() 70 return hash; crush_hash32_rjenkins1_4() 76 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e; crush_hash32_rjenkins1_5() local 79 crush_hashmix(a, b, hash); crush_hash32_rjenkins1_5() 80 crush_hashmix(c, d, hash); crush_hash32_rjenkins1_5() 81 crush_hashmix(e, x, hash); crush_hash32_rjenkins1_5() 82 crush_hashmix(y, a, hash); crush_hash32_rjenkins1_5() 83 crush_hashmix(b, x, hash); crush_hash32_rjenkins1_5() 84 crush_hashmix(y, c, hash); crush_hash32_rjenkins1_5() 85 crush_hashmix(d, x, hash); crush_hash32_rjenkins1_5() 86 crush_hashmix(y, e, hash); crush_hash32_rjenkins1_5() 87 return hash; crush_hash32_rjenkins1_5()
|
/linux-4.1.27/net/batman-adv/ |
H A D | hash.c | 19 #include "hash.h" 21 /* clears the hash */ batadv_hash_init() 22 static void batadv_hash_init(struct batadv_hashtable *hash) batadv_hash_init() argument 26 for (i = 0; i < hash->size; i++) { batadv_hash_init() 27 INIT_HLIST_HEAD(&hash->table[i]); batadv_hash_init() 28 spin_lock_init(&hash->list_locks[i]); batadv_hash_init() 32 /* free only the hashtable and the hash itself. */ batadv_hash_destroy() 33 void batadv_hash_destroy(struct batadv_hashtable *hash) batadv_hash_destroy() argument 35 kfree(hash->list_locks); batadv_hash_destroy() 36 kfree(hash->table); batadv_hash_destroy() 37 kfree(hash); batadv_hash_destroy() 40 /* allocates and clears the hash */ batadv_hash_new() 43 struct batadv_hashtable *hash; batadv_hash_new() local 45 hash = kmalloc(sizeof(*hash), GFP_ATOMIC); batadv_hash_new() 46 if (!hash) batadv_hash_new() 49 hash->table = kmalloc_array(size, sizeof(*hash->table), GFP_ATOMIC); batadv_hash_new() 50 if (!hash->table) batadv_hash_new() 53 hash->list_locks = kmalloc_array(size, sizeof(*hash->list_locks), batadv_hash_new() 55 if (!hash->list_locks) batadv_hash_new() 58 hash->size = size; batadv_hash_new() 59 batadv_hash_init(hash); batadv_hash_new() 60 return hash; batadv_hash_new() 63 kfree(hash->table); batadv_hash_new() 65 kfree(hash); batadv_hash_new() 69 void batadv_hash_set_lock_class(struct batadv_hashtable *hash, batadv_hash_set_lock_class() argument 74 for (i = 0; i < hash->size; i++) batadv_hash_set_lock_class() 75 lockdep_set_class(&hash->list_locks[i], key); batadv_hash_set_lock_class()
|
H A D | hash.h | 38 spinlock_t *list_locks; /* spinlock for each hash list entry */ 42 /* allocates and clears the hash */ 46 void batadv_hash_set_lock_class(struct batadv_hashtable *hash, 49 /* free only the hashtable and the hash itself. */ 50 void batadv_hash_destroy(struct batadv_hashtable *hash); 52 /* remove the hash structure. if hashdata_free_cb != NULL, this function will be 53 * called to remove the elements inside of the hash. if you don't remove the 56 static inline void batadv_hash_delete(struct batadv_hashtable *hash, batadv_hash_delete() argument 65 for (i = 0; i < hash->size; i++) { batadv_hash_delete() 66 head = &hash->table[i]; batadv_hash_delete() 67 list_lock = &hash->list_locks[i]; batadv_hash_delete() 79 batadv_hash_destroy(hash); 83 * batadv_hash_bytes - hash some bytes and add them to the previous hash 84 * @hash: previous hash value 88 * Returns the new hash value. 90 static inline uint32_t batadv_hash_bytes(uint32_t hash, const void *data, batadv_hash_bytes() argument 97 hash += key[i]; batadv_hash_bytes() 98 hash += (hash << 10); batadv_hash_bytes() 99 hash ^= (hash >> 6); batadv_hash_bytes() 101 return hash; batadv_hash_bytes() 106 * @hash: storage hash table 107 * @compare: callback to determine if 2 hash elements are identical 108 * @choose: callback calculating the hash index 112 * Returns 0 on success, 1 if the element already is in the hash 115 static inline int batadv_hash_add(struct batadv_hashtable *hash, batadv_hash_add() argument 127 if (!hash) batadv_hash_add() 130 index = choose(data, hash->size); batadv_hash_add() 131 head = &hash->table[index]; batadv_hash_add() 132 list_lock = &hash->list_locks[index]; batadv_hash_add() 155 /* removes data from hash, if found. returns pointer do data on success, so you 160 static inline void *batadv_hash_remove(struct batadv_hashtable *hash, batadv_hash_remove() argument 170 index = choose(data, hash->size); batadv_hash_remove() 171 head = &hash->table[index]; batadv_hash_remove() 173 spin_lock_bh(&hash->list_locks[index]); hlist_for_each() 182 spin_unlock_bh(&hash->list_locks[index]);
|
H A D | originator.h | 21 #include "hash.h" 72 /* hashfunction to choose an entry in a hash table of given size 73 * hash algorithm from http://en.wikipedia.org/wiki/Hash_table 78 uint32_t hash = 0; batadv_choose_orig() local 82 hash += key[i]; batadv_choose_orig() 83 hash += (hash << 10); batadv_choose_orig() 84 hash ^= (hash >> 6); batadv_choose_orig() 87 hash += (hash << 3); batadv_choose_orig() 88 hash ^= (hash >> 11); batadv_choose_orig() 89 hash += (hash << 15); batadv_choose_orig() 91 return hash % size; batadv_choose_orig() 97 struct batadv_hashtable *hash = bat_priv->orig_hash; batadv_orig_hash_find() local 102 if (!hash) batadv_orig_hash_find() 105 index = batadv_choose_orig(data, hash->size); batadv_orig_hash_find() 106 head = &hash->table[index]; batadv_orig_hash_find()
|
H A D | bridge_loop_avoidance.c | 19 #include "hash.h" 43 uint32_t hash = 0; batadv_choose_claim() local 45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); batadv_choose_claim() 46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); batadv_choose_claim() 48 hash += (hash << 3); batadv_choose_claim() 49 hash ^= (hash >> 11); batadv_choose_claim() 50 hash += (hash << 15); batadv_choose_claim() 52 return hash % size; batadv_choose_claim() 60 uint32_t hash = 0; batadv_choose_backbone_gw() local 62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr)); batadv_choose_backbone_gw() 63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid)); batadv_choose_backbone_gw() 65 hash += (hash << 3); batadv_choose_backbone_gw() 66 hash ^= (hash >> 11); batadv_choose_backbone_gw() 67 hash += (hash << 15); batadv_choose_backbone_gw() 69 return hash % size; batadv_choose_backbone_gw() 133 * looks for a claim in the hash, and returns it if found 140 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; batadv_claim_hash_find() local 146 if (!hash) batadv_claim_hash_find() 149 index = batadv_choose_claim(data, hash->size); batadv_claim_hash_find() 150 head = &hash->table[index]; batadv_claim_hash_find() 169 * batadv_backbone_hash_find - looks for a claim in the hash 180 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; batadv_backbone_hash_find() local 186 if (!hash) batadv_backbone_hash_find() 192 index = batadv_choose_backbone_gw(&search_entry, hash->size); batadv_backbone_hash_find() 193 head = &hash->table[index]; batadv_backbone_hash_find() 216 struct batadv_hashtable *hash; batadv_bla_del_backbone_claims() local 221 spinlock_t *list_lock; /* protects write access to the hash lists */ batadv_bla_del_backbone_claims() 223 hash = backbone_gw->bat_priv->bla.claim_hash; batadv_bla_del_backbone_claims() 224 if (!hash) batadv_bla_del_backbone_claims() 227 for (i = 0; i < hash->size; i++) { batadv_bla_del_backbone_claims() 228 head = &hash->table[i]; batadv_bla_del_backbone_claims() 229 list_lock = &hash->list_locks[i]; batadv_bla_del_backbone_claims() 396 /* one for the hash, one for returning */ batadv_bla_get_backbone_gw() 405 /* hash failed, free the structure */ batadv_bla_get_backbone_gw() 464 struct batadv_hashtable *hash; batadv_bla_answer_request() local 478 hash = bat_priv->bla.claim_hash; batadv_bla_answer_request() 479 for (i = 0; i < hash->size; i++) { batadv_bla_answer_request() 480 head = &hash->table[i]; batadv_bla_answer_request() 549 * batadv_bla_add_claim - Adds a claim in the claim hash 580 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", batadv_bla_add_claim() 616 /* Delete a claim from the claim hash which has the 635 batadv_claim_free_ref(claim); /* reference from the hash is gone */ batadv_bla_del_claim() 993 struct batadv_hashtable *hash; batadv_bla_purge_backbone_gw() local 994 spinlock_t *list_lock; /* protects write access to the hash lists */ batadv_bla_purge_backbone_gw() 997 hash = bat_priv->bla.backbone_hash; batadv_bla_purge_backbone_gw() 998 if (!hash) batadv_bla_purge_backbone_gw() 1001 for (i = 0; i < hash->size; i++) { batadv_bla_purge_backbone_gw() 1002 head = &hash->table[i]; batadv_bla_purge_backbone_gw() 1003 list_lock = &hash->list_locks[i]; batadv_bla_purge_backbone_gw() 1036 * @now: whether the whole hash shall be wiped now 1047 struct batadv_hashtable *hash; batadv_bla_purge_claims() local 1050 hash = bat_priv->bla.claim_hash; batadv_bla_purge_claims() 1051 if (!hash) batadv_bla_purge_claims() 1054 for (i = 0; i < hash->size; i++) { batadv_bla_purge_claims() 1055 head = &hash->table[i]; batadv_bla_purge_claims() 1095 struct batadv_hashtable *hash; batadv_bla_update_orig_address() local 1113 hash = bat_priv->bla.backbone_hash; batadv_bla_update_orig_address() 1114 if (!hash) batadv_bla_update_orig_address() 1117 for (i = 0; i < hash->size; i++) { batadv_bla_update_orig_address() 1118 head = &hash->table[i]; batadv_bla_update_orig_address() 1149 struct batadv_hashtable *hash; batadv_bla_periodic_work() local 1166 hash = bat_priv->bla.backbone_hash; batadv_bla_periodic_work() 1167 if (!hash) batadv_bla_periodic_work() 1170 for (i = 0; i < hash->size; i++) { batadv_bla_periodic_work() 1171 head = &hash->table[i]; batadv_bla_periodic_work() 1211 /* The hash for claim and backbone hash receive the same key because they 1230 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); batadv_bla_init() 1356 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; batadv_bla_is_backbone_gw_orig() local 1364 if (!hash) batadv_bla_is_backbone_gw_orig() 1367 for (i = 0; i < hash->size; i++) { batadv_bla_is_backbone_gw_orig() 1368 head = &hash->table[i]; batadv_bla_is_backbone_gw_orig() 1628 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; batadv_bla_claim_table_seq_print_text() local 1647 for (i = 0; i < hash->size; i++) { batadv_bla_claim_table_seq_print_text() 1648 head = &hash->table[i]; batadv_bla_claim_table_seq_print_text() 1672 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; batadv_bla_backbone_table_seq_print_text() local 1692 for (i = 0; i < hash->size; i++) { batadv_bla_backbone_table_seq_print_text() 1693 head = &hash->table[i]; batadv_bla_backbone_table_seq_print_text()
|
H A D | distributed-arp-table.c | 25 #include "hash.h" 83 spinlock_t *list_lock; /* protects write access to the hash lists */ __batadv_dat_purge() 89 if (!bat_priv->dat.hash) __batadv_dat_purge() 92 for (i = 0; i < bat_priv->dat.hash->size; i++) { __batadv_dat_purge() 93 head = &bat_priv->dat.hash->table[i]; __batadv_dat_purge() 94 list_lock = &bat_priv->dat.hash->list_locks[i]; __batadv_dat_purge() 114 * hash table 132 * batadv_compare_dat - comparing function used in the local DAT hash table 200 * batadv_hash_dat - compute the hash value for an IP address 201 * @data: data to hash 202 * @size: size of the hash table 204 * Returns the selected index in the hash table for the given data. 208 uint32_t hash = 0; batadv_hash_dat() local 211 hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip)); batadv_hash_dat() 212 hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid)); batadv_hash_dat() 214 hash += (hash << 3); batadv_hash_dat() 215 hash ^= (hash >> 11); batadv_hash_dat() 216 hash += (hash << 15); batadv_hash_dat() 218 return hash % size; batadv_hash_dat() 222 * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash 236 struct batadv_hashtable *hash = bat_priv->dat.hash; batadv_dat_entry_hash_find() local 239 if (!hash) batadv_dat_entry_hash_find() 245 index = batadv_hash_dat(&to_find, hash->size); batadv_dat_entry_hash_find() 246 head = &hash->table[index]; batadv_dat_entry_hash_find() 300 hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, batadv_dat_entry_add() 305 /* remove the reference for the hash */ batadv_dat_entry_add() 444 /* this is an hash collision with the temporary selected node. Choose batadv_is_orig_node_eligible() 471 struct batadv_hashtable *hash = bat_priv->orig_hash; batadv_choose_next_candidate() local 483 for (i = 0; i < hash->size; i++) { batadv_choose_next_candidate() 484 head = &hash->table[i]; batadv_choose_next_candidate() 526 * closest values (from the LEFT, with wrap around if needed) then the hash 550 "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, batadv_dat_select_candidates() 692 * batadv_dat_hash_free - free the local DAT hash table 697 if (!bat_priv->dat.hash) batadv_dat_hash_free() 702 batadv_hash_destroy(bat_priv->dat.hash); batadv_dat_hash_free() 704 bat_priv->dat.hash = NULL; batadv_dat_hash_free() 713 if (bat_priv->dat.hash) batadv_dat_init() 716 bat_priv->dat.hash = batadv_hash_new(1024); batadv_dat_init() 718 if (!bat_priv->dat.hash) batadv_dat_init() 745 * batadv_dat_cache_seq_print_text - print the local DAT hash table 753 struct batadv_hashtable *hash = bat_priv->dat.hash; batadv_dat_cache_seq_print_text() local 769 for (i = 0; i < hash->size; i++) { batadv_dat_cache_seq_print_text() 770 head = &hash->table[i]; batadv_dat_cache_seq_print_text()
|
H A D | network-coding.c | 22 #include "hash.h" 115 * batadv_nc_mesh_init - initialise coding hash table and start house keeping 344 * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they 350 struct batadv_hashtable *hash = bat_priv->orig_hash; batadv_nc_purge_orig_hash() local 355 if (!hash) batadv_nc_purge_orig_hash() 359 for (i = 0; i < hash->size; i++) { batadv_nc_purge_orig_hash() 360 head = &hash->table[i]; batadv_nc_purge_orig_hash() 371 * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove 374 * @hash: hash table containing the nc paths to check 381 struct batadv_hashtable *hash, batadv_nc_purge_paths() 388 spinlock_t *lock; /* Protects lists in hash */ batadv_nc_purge_paths() 391 for (i = 0; i < hash->size; i++) { batadv_nc_purge_paths() 392 head = &hash->table[i]; batadv_nc_purge_paths() 393 lock = &hash->list_locks[i]; batadv_nc_purge_paths() 430 * batadv_nc_hash_key_gen - computes the nc_path hash key 431 * @key: buffer to hold the final hash key 432 * @src: source ethernet mac address going into the hash key 433 * @dst: destination ethernet mac address going into the hash key 443 * batadv_nc_hash_choose - compute the hash value for an nc path 444 * @data: data to hash 445 * @size: size of the hash table 447 * Returns the selected index in the hash table for the given data. 452 uint32_t hash = 0; batadv_nc_hash_choose() local 454 hash = batadv_hash_bytes(hash, &nc_path->prev_hop, batadv_nc_hash_choose() 456 hash = batadv_hash_bytes(hash, &nc_path->next_hop, batadv_nc_hash_choose() 459 hash += (hash << 3); batadv_nc_hash_choose() 460 hash ^= (hash >> 11); batadv_nc_hash_choose() 461 hash += (hash << 15); batadv_nc_hash_choose() 463 return hash % size; batadv_nc_hash_choose() 467 * batadv_nc_hash_compare - comparing function used in the network coding hash 496 * @hash: hash table containing the nc path 502 batadv_nc_hash_find(struct batadv_hashtable *hash, batadv_nc_hash_find() argument 509 if (!hash) batadv_nc_hash_find() 512 index = batadv_nc_hash_choose(data, hash->size); batadv_nc_hash_find() 513 head = &hash->table[index]; batadv_nc_hash_find() 621 * @hash: to be processed hash table 628 struct batadv_hashtable *hash, batadv_nc_process_nc_paths() 639 if (!hash) batadv_nc_process_nc_paths() 642 /* Loop hash table bins */ batadv_nc_process_nc_paths() 643 for (i = 0; i < hash->size; i++) { batadv_nc_process_nc_paths() 644 head = &hash->table[i]; batadv_nc_process_nc_paths() 906 * @hash: hash table containing the nc path 914 struct batadv_hashtable *hash, batadv_nc_get_path() 924 nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key); batadv_nc_get_path() 950 /* Add nc_path to hash table */ batadv_nc_get_path() 951 hash_added = batadv_hash_add(hash, batadv_nc_hash_compare, batadv_nc_get_path() 1237 struct batadv_hashtable *hash = bat_priv->nc.coding_hash; batadv_nc_path_search() local 1240 if (!hash) batadv_nc_path_search() 1246 idx = batadv_nc_hash_choose(&nc_path_key, hash->size); batadv_nc_path_search() 1250 hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) { batadv_nc_path_search() 1704 struct batadv_hashtable *hash = bat_priv->nc.decoding_hash; batadv_nc_find_decoding_packet() local 1711 if (!hash) batadv_nc_find_decoding_packet() 1725 index = batadv_nc_hash_choose(&nc_path_key, hash->size); batadv_nc_find_decoding_packet() 1729 hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) { batadv_nc_find_decoding_packet() 1849 struct batadv_hashtable *hash = bat_priv->orig_hash; batadv_nc_nodes_seq_print_text() local 1861 for (i = 0; i < hash->size; i++) { batadv_nc_nodes_seq_print_text() 1862 head = &hash->table[i]; batadv_nc_nodes_seq_print_text() 380 batadv_nc_purge_paths(struct batadv_priv *bat_priv, struct batadv_hashtable *hash, bool (*to_purge)(struct batadv_priv *, struct batadv_nc_path *)) batadv_nc_purge_paths() argument 627 batadv_nc_process_nc_paths(struct batadv_priv *bat_priv, struct batadv_hashtable *hash, bool (*process_fn)(struct batadv_priv *, struct batadv_nc_path *, struct batadv_nc_packet *)) batadv_nc_process_nc_paths() argument 913 batadv_nc_get_path(struct batadv_priv *bat_priv, struct batadv_hashtable *hash, uint8_t *src, uint8_t *dst) batadv_nc_get_path() argument
|
/linux-4.1.27/tools/include/linux/ |
H A D | hash.h | 1 #include "../../../include/linux/hash.h"
|
/linux-4.1.27/net/core/ |
H A D | secure_seq.c | 47 u32 hash[MD5_DIGEST_WORDS]; secure_tcpv6_sequence_number() local 51 memcpy(hash, saddr, 16); secure_tcpv6_sequence_number() 59 md5_transform(hash, secret); secure_tcpv6_sequence_number() 61 return seq_scale(hash[0]); secure_tcpv6_sequence_number() 69 u32 hash[MD5_DIGEST_WORDS]; secure_ipv6_port_ephemeral() local 73 memcpy(hash, saddr, 16); secure_ipv6_port_ephemeral() 80 md5_transform(hash, secret); secure_ipv6_port_ephemeral() 82 return hash[0]; secure_ipv6_port_ephemeral() 92 u32 hash[MD5_DIGEST_WORDS]; secure_tcp_sequence_number() local 95 hash[0] = (__force u32)saddr; secure_tcp_sequence_number() 96 hash[1] = (__force u32)daddr; secure_tcp_sequence_number() 97 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; secure_tcp_sequence_number() 98 hash[3] = net_secret[15]; secure_tcp_sequence_number() 100 md5_transform(hash, net_secret); secure_tcp_sequence_number() 102 return seq_scale(hash[0]); secure_tcp_sequence_number() 107 u32 hash[MD5_DIGEST_WORDS]; secure_ipv4_port_ephemeral() local 110 hash[0] = (__force u32)saddr; secure_ipv4_port_ephemeral() 111 hash[1] = (__force u32)daddr; secure_ipv4_port_ephemeral() 112 hash[2] = (__force u32)dport ^ net_secret[14]; secure_ipv4_port_ephemeral() 113 hash[3] = net_secret[15]; secure_ipv4_port_ephemeral() 115 md5_transform(hash, net_secret); secure_ipv4_port_ephemeral() 117 return hash[0]; secure_ipv4_port_ephemeral() 126 u32 hash[MD5_DIGEST_WORDS]; secure_dccp_sequence_number() local 130 hash[0] = (__force u32)saddr; secure_dccp_sequence_number() 131 hash[1] = (__force u32)daddr; secure_dccp_sequence_number() 132 hash[2] = ((__force u16)sport << 16) + (__force u16)dport; secure_dccp_sequence_number() 133 hash[3] = net_secret[15]; secure_dccp_sequence_number() 135 md5_transform(hash, net_secret); secure_dccp_sequence_number() 137 seq = hash[0] | (((u64)hash[1]) << 32); secure_dccp_sequence_number() 150 u32 hash[MD5_DIGEST_WORDS]; secure_dccpv6_sequence_number() local 155 memcpy(hash, saddr, 16); secure_dccpv6_sequence_number() 163 md5_transform(hash, secret); secure_dccpv6_sequence_number() 165 seq = hash[0] | (((u64)hash[1]) << 32); secure_dccpv6_sequence_number()
|
H A D | flow_dissector.c | 285 u32 hash; __flow_hash_from_keys() local 287 /* get a consistent hash (same value on both flow directions) */ __flow_hash_from_keys() 295 hash = __flow_hash_3words((__force u32)keys->dst, __flow_hash_from_keys() 298 if (!hash) __flow_hash_from_keys() 299 hash = 1; __flow_hash_from_keys() 301 return hash; __flow_hash_from_keys() 311 * __skb_get_hash: calculate a flow hash based on src/dst addresses 312 * and src/dst port numbers. Sets hash in skb to non-zero hash value 313 * on success, zero indicates no valid hash. Also, sets l4_hash in skb 314 * if hash is a canonical 4-tuple hash over transport ports. 328 skb->hash = __flow_hash_from_keys(&keys); __skb_get_hash() 333 * Returns a Tx hash based on the given packet descriptor a Tx queues' number 339 u32 hash; __skb_tx_hash() local 344 hash = skb_get_rx_queue(skb); __skb_tx_hash() 345 while (unlikely(hash >= num_tx_queues)) __skb_tx_hash() 346 hash -= num_tx_queues; __skb_tx_hash() 347 return hash; __skb_tx_hash()
|
/linux-4.1.27/drivers/staging/skein/ |
H A D | Makefile | 2 # Makefile for the skein secure hash algorithm
|
H A D | skein_block.h | 3 ** Implementation of the Skein hash function.
|
H A D | skein_api.h | 50 * struct skein_ctx ctx; // a Skein hash or MAC context 55 * // Initialize the context to set the requested hash length in bits 56 * // here request a output hash size of 31 bits (Skein supports variable 64 * // Now get the result of the Skein hash. The output buffer must be 72 * it for creation of another hash with the same Skein state size and output 137 * Number of MAC hash bits to compute 172 * Number of MAC hash bits to compute 195 * Update the hash with a message bit string. 211 * Finalize Skein and return the hash. 218 * @param hash 219 * Pointer to buffer that receives the hash. The buffer must be large 225 int skein_final(struct skein_ctx *ctx, u8 *hash);
|
H A D | skein_base.c | 3 ** Implementation of the Skein hash function. 31 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_256_init() 57 /* hash result length in bits */ skein_256_init() 70 /* Set up to process the data message portion of the hash (default) */ skein_256_init() 77 /* init the context for a MAC and/or tree hash operation */ 98 /* set output hash bit count = state size */ skein_256_init_ext() 104 /* hash the key */ skein_256_init_ext() 115 /* output hash bit count */ skein_256_init_ext() 122 /* hash result length in bits */ skein_256_init_ext() 124 /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ skein_256_init_ext() 131 /* Set up to process the data message portion of the hash (default) */ skein_256_init_ext() 193 /* finalize the hash computation and output the result */ skein_256_final() 253 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_512_init() 279 /* hash result length in bits */ skein_512_init() 296 /* Set up to process the data message portion of the hash (default) */ skein_512_init() 303 /* init the context for a MAC and/or tree hash operation */ 324 /* set output hash bit count = state size */ skein_512_init_ext() 330 /* hash the key */ skein_512_init_ext() 341 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_512_init_ext() 347 /* hash result length in bits */ skein_512_init_ext() 349 /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ skein_512_init_ext() 356 /* Set up to process the data message portion of the hash (default) */ skein_512_init_ext() 418 /* finalize the hash computation and output the result */ skein_512_final() 478 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_1024_init() 501 /* hash result length in bits */ skein_1024_init() 515 /* Set up to process the data message portion of the hash (default) */ skein_1024_init() 522 /* init the context for a MAC and/or tree hash operation */ 543 /* set output hash bit count = state size */ skein_1024_init_ext() 549 /* hash the key */ skein_1024_init_ext() 560 /* output hash bit count */ skein_1024_init_ext() 567 /* hash result length in bits */ skein_1024_init_ext() 569 /* tree hash config info (or SKEIN_CFG_TREE_INFO_SEQUENTIAL) */ skein_1024_init_ext() 576 /* Set up to process the data message portion of the hash (default) */ skein_1024_init_ext() 638 /* finalize the hash computation and output the result */ skein_1024_final() 688 /* finalize the hash computation and output the block, no OUTPUT stage */ skein_256_final_pad() 710 /* finalize the hash computation and output the block, no OUTPUT stage */ skein_512_final_pad() 732 /* finalize the hash computation and output the block, no OUTPUT stage */ skein_1024_final_pad()
|
/linux-4.1.27/fs/ext3/ |
H A D | hash.c | 2 * linux/fs/ext3/hash.c 35 /* The old legacy hash */ dx_hack_hash_unsigned() 38 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; dx_hack_hash_unsigned() local 42 hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); dx_hack_hash_unsigned() 44 if (hash & 0x80000000) dx_hack_hash_unsigned() 45 hash -= 0x7fffffff; dx_hack_hash_unsigned() 47 hash0 = hash; dx_hack_hash_unsigned() 54 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; dx_hack_hash_signed() local 58 hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); dx_hack_hash_signed() 60 if (hash & 0x80000000) dx_hack_hash_signed() 61 hash -= 0x7fffffff; dx_hack_hash_signed() 63 hash0 = hash; dx_hack_hash_signed() 125 * Returns the hash of a filename. If len is 0 and name is NULL, then 126 * this function can be used to test whether or not a hash version is 130 * uniquify a hash. If the seed is all zero's, then some default seed 133 * A particular hash version specifies whether or not the seed is 134 * represented, and whether or not the returned hash is 32 bits or 64 135 * bits. 32 bit hashes will return 0 for the minor hash. 139 __u32 hash; ext3fs_dirhash() local 147 /* Initialize the default seed for the hash checksum functions */ ext3fs_dirhash() 165 hash = dx_hack_hash_unsigned(name, len); ext3fs_dirhash() 168 hash = dx_hack_hash_signed(name, len); ext3fs_dirhash() 181 hash = buf[1]; ext3fs_dirhash() 193 hash = buf[0]; ext3fs_dirhash() 197 hinfo->hash = 0; ext3fs_dirhash() 200 hash = hash & ~1; ext3fs_dirhash() 201 if (hash == (EXT3_HTREE_EOF_32BIT << 1)) ext3fs_dirhash() 202 hash = (EXT3_HTREE_EOF_32BIT - 1) << 1; ext3fs_dirhash() 203 hinfo->hash = hash; ext3fs_dirhash()
|
H A D | Makefile | 8 ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
|
H A D | dir.c | 218 * These functions convert from the major/minor hash to an f_pos 269 * of the filename hash value instead of the byte offset. 271 * Because we may return a 64-bit hash that is well beyond s_maxbytes, 272 * we need to pass the max hash as the maximum allowable offset in 293 * the directory entry in hash order. 296 __u32 hash; member in struct:fname 346 int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, ext3_htree_store_dirent() argument 363 new_fn->hash = hash; ext3_htree_store_dirent() 376 * If the hash and minor hash match up, then we put ext3_htree_store_dirent() 379 if ((new_fn->hash == fname->hash) && ext3_htree_store_dirent() 386 if (new_fn->hash < fname->hash) ext3_htree_store_dirent() 388 else if (new_fn->hash > fname->hash) ext3_htree_store_dirent() 406 * one entry on the linked list, unless there are 62 bit hash collisions.) 419 ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); call_filldir() 459 * If there are any leftover names on the hash collision ext3_dx_readdir() 494 info->curr_hash = fname->hash; ext3_dx_readdir() 503 info->curr_hash = fname->hash; ext3_dx_readdir()
|
H A D | xattr.c | 1135 __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); ext3_xattr_cache_insert() local 1144 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); ext3_xattr_cache_insert() 1152 ea_bdebug(bh, "inserting [%x]", (int)hash); ext3_xattr_cache_insert() 1209 __u32 hash = le32_to_cpu(header->h_hash); ext3_xattr_cache_find() local 1214 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ext3_xattr_cache_find() 1217 hash); ext3_xattr_cache_find() 1242 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); ext3_xattr_cache_find() 1253 * Compute the hash of an extended attribute. 1258 __u32 hash = 0; ext3_xattr_hash_entry() local 1263 hash = (hash << NAME_HASH_SHIFT) ^ ext3_xattr_hash_entry() 1264 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ ext3_xattr_hash_entry() 1273 hash = (hash << VALUE_HASH_SHIFT) ^ ext3_xattr_hash_entry() 1274 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ ext3_xattr_hash_entry() 1278 entry->e_hash = cpu_to_le32(hash); ext3_xattr_hash_entry() 1289 * Re-compute the extended attribute hash value after an entry has changed. 1295 __u32 hash = 0; ext3_xattr_rehash() local 1301 /* Block is not shared if an entry's hash value == 0 */ ext3_xattr_rehash() 1302 hash = 0; ext3_xattr_rehash() 1305 hash = (hash << BLOCK_HASH_SHIFT) ^ ext3_xattr_rehash() 1306 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ ext3_xattr_rehash() 1310 header->h_hash = cpu_to_le32(hash); ext3_xattr_rehash()
|
/linux-4.1.27/fs/ext4/ |
H A D | hash.c | 2 * linux/fs/ext4/hash.c 36 /* The old legacy hash */ dx_hack_hash_unsigned() 39 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; dx_hack_hash_unsigned() local 43 hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373)); dx_hack_hash_unsigned() 45 if (hash & 0x80000000) dx_hack_hash_unsigned() 46 hash -= 0x7fffffff; dx_hack_hash_unsigned() 48 hash0 = hash; dx_hack_hash_unsigned() 55 __u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9; dx_hack_hash_signed() local 59 hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373)); dx_hack_hash_signed() 61 if (hash & 0x80000000) dx_hack_hash_signed() 62 hash -= 0x7fffffff; dx_hack_hash_signed() 64 hash0 = hash; dx_hack_hash_signed() 126 * Returns the hash of a filename. If len is 0 and name is NULL, then 127 * this function can be used to test whether or not a hash version is 131 * uniquify a hash. If the seed is all zero's, then some default seed 134 * A particular hash version specifies whether or not the seed is 135 * represented, and whether or not the returned hash is 32 bits or 64 136 * bits. 32 bit hashes will return 0 for the minor hash. 140 __u32 hash; ext4fs_dirhash() local 148 /* Initialize the default seed for the hash checksum functions */ ext4fs_dirhash() 166 hash = dx_hack_hash_unsigned(name, len); ext4fs_dirhash() 169 hash = dx_hack_hash_signed(name, len); ext4fs_dirhash() 182 hash = buf[1]; ext4fs_dirhash() 194 hash = buf[0]; ext4fs_dirhash() 198 hinfo->hash = 0; ext4fs_dirhash() 201 hash = hash & ~1; ext4fs_dirhash() 202 if (hash == (EXT4_HTREE_EOF_32BIT << 1)) ext4fs_dirhash() 203 hash = (EXT4_HTREE_EOF_32BIT - 1) << 1; ext4fs_dirhash() 204 hinfo->hash = hash; ext4fs_dirhash()
|
H A D | Makefile | 8 ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
|
H A D | dir.c | 292 * These functions convert from the major/minor hash to an f_pos 342 * directories, where the "offset" is in terms of the filename hash 345 * Because we may return a 64-bit hash that is well beyond offset limits, 346 * we need to pass the max hash as the maximum allowable offset in 366 * the directory entry in hash order. 369 __u32 hash; member in struct:fname 424 int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, ext4_htree_store_dirent() argument 442 new_fn->hash = hash; ext4_htree_store_dirent() 455 * If the hash and minor hash match up, then we put ext4_htree_store_dirent() 458 if ((new_fn->hash == fname->hash) && ext4_htree_store_dirent() 465 if (new_fn->hash < fname->hash) ext4_htree_store_dirent() 467 else if (new_fn->hash > fname->hash) ext4_htree_store_dirent() 485 * one entry on the linked list, unless there are 62 bit hash collisions.) 500 ctx->pos = hash2pos(file, fname->hash, fname->minor_hash); call_filldir() 541 * If there are any leftover names on the hash collision ext4_dx_readdir() 576 info->curr_hash = fname->hash; ext4_dx_readdir() 585 info->curr_hash = fname->hash; ext4_dx_readdir()
|
/linux-4.1.27/security/selinux/ss/ |
H A D | hashtab.h | 2 * A hash table (hashtab) maintains associations between 5 * functions for hash computation and key comparison are 22 struct hashtab_node **htable; /* hash table */ 23 u32 size; /* number of slots in hash table */ 24 u32 nel; /* number of elements in hash table */ 26 /* hash function */ 37 * Creates a new hash table with the specified characteristics. 40 * the new hash table otherwise. 47 * Inserts the specified (key, datum) pair into the specified hash table. 57 * Searches for the entry with the specified key in the hash table. 65 * Destroys the specified hash table. 71 * for each entry in the specified hash table. 74 * is dependent upon the internal structure of the hash table. 77 * iterating through the hash table and will propagate the error 84 /* Fill info with some hash table statistics */
|
H A D | symtab.h | 5 * using the hash table type (hashtab). 15 struct hashtab *table; /* hash table (keyed on a string) */
|
H A D | avtab.c | 17 * Tuned number of hash slots for avtab to reduce memory usage 40 u32 hash = 0; avtab_hash() local 47 hash ^= v; \ avtab_hash() 48 hash = (hash << r2) | (hash >> (32 - r2)); \ avtab_hash() 49 hash = hash * m + n; \ avtab_hash() 58 hash ^= hash >> 16; avtab_hash() 59 hash *= 0x85ebca6b; avtab_hash() 60 hash ^= hash >> 13; avtab_hash() 61 hash *= 0xc2b2ae35; avtab_hash() 62 hash ^= hash >> 16; avtab_hash() 64 return hash & mask; avtab_hash() 320 printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n", avtab_alloc()
|
H A D | avtab.h | 2 * An access vector table (avtab) is a hash table 21 * Tuned number of hash slots for avtab to reduce memory usage 58 u32 nslot; /* number of hash slots */ 59 u32 mask; /* mask to compute hash func */
|
/linux-4.1.27/include/linux/ |
H A D | hash.h | 39 * multiplicative hash. 53 u64 hash = val; hash_64() local 56 hash = hash * GOLDEN_RATIO_64; hash_64() 59 u64 n = hash; hash_64() 61 hash -= n; hash_64() 63 hash -= n; hash_64() 65 hash += n; hash_64() 67 hash -= n; hash_64() 69 hash += n; hash_64() 71 hash += n; hash_64() 75 return hash >> (64 - bits); hash_64() 81 u32 hash = val * GOLDEN_RATIO_PRIME_32; hash_32() local 84 return hash >> (32 - bits); hash_32()
|
H A D | rhashtable.h | 39 * Hash (27 bits): Full hash (unmasked) of first element added to bucket 56 * struct bucket_table - Table of hash buckets 57 * @size: Number of hash buckets 59 * @hash_rnd: Random seed to fold into hash 65 * @buckets: size * hash buckets 112 * @obj_hashfn: Function to hash object 180 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) rht_marker() argument 182 return NULLS_MARKER(ht->p.nulls_base + hash); rht_marker() 185 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \ 186 ((ptr) = (typeof(ptr)) rht_marker(ht, hash)) 205 unsigned int hash) rht_bucket_index() 207 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); rht_bucket_index() 214 unsigned int hash; rht_key_hashfn() local 218 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); rht_key_hashfn() 223 hash = params.hashfn(key, key_len, tbl->hash_rnd); rht_key_hashfn() 225 hash = jhash(key, key_len, tbl->hash_rnd); rht_key_hashfn() 227 hash = jhash2(key, key_len / sizeof(u32), rht_key_hashfn() 233 hash = params.hashfn(key, key_len, tbl->hash_rnd); rht_key_hashfn() 235 hash = jhash(key, key_len, tbl->hash_rnd); rht_key_hashfn() 238 return rht_bucket_index(tbl, hash); rht_key_hashfn() 256 * @ht: hash table 269 * @ht: hash table 282 * @ht: hash table 294 * @ht: hash table 304 /* The bucket lock is selected based on the hash and protects mutations 305 * on a group of hash buckets. 318 unsigned int hash) rht_bucket_lock() 320 return &tbl->locks[hash & tbl->locks_mask]; rht_bucket_lock() 325 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); 333 u32 hash) lockdep_rht_bucket_is_held() 364 #define rht_dereference_bucket(p, tbl, hash) \ 365 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) 367 #define rht_dereference_bucket_rcu(p, tbl, hash) \ 368 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) 374 * rht_for_each_continue - continue iterating over hash chain 378 * @hash: the hash value / bucket index 380 #define rht_for_each_continue(pos, head, tbl, hash) \ 381 for (pos = rht_dereference_bucket(head, tbl, hash); \ 383 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 386 * rht_for_each - iterate over hash chain 389 * @hash: the hash value / bucket index 391 #define rht_for_each(pos, tbl, hash) \ 392 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash) 395 * rht_for_each_entry_continue - continue iterating over hash chain 400 * @hash: the hash value / bucket index 403 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \ 404 for (pos = rht_dereference_bucket(head, tbl, hash); \ 406 pos = rht_dereference_bucket((pos)->next, tbl, hash)) 409 * rht_for_each_entry - iterate over hash chain of given type 413 * @hash: the hash value / bucket index 416 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ 417 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \ 418 tbl, hash, member) 421 * rht_for_each_entry_safe - safely iterate over hash chain of given type 426 * @hash: the hash value / bucket index 429 * This hash chain list-traversal primitive allows for the looped code to 432 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ 433 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \ 435 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ 439 rht_dereference_bucket(pos->next, tbl, hash) : NULL) 442 * rht_for_each_rcu_continue - continue iterating over rcu hash chain 446 * @hash: the hash value / bucket index 448 * This hash chain list-traversal primitive may safely run concurrently with 452 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \ 454 pos = rht_dereference_bucket_rcu(head, tbl, hash); \ 459 * rht_for_each_rcu - iterate over rcu hash chain 462 * @hash: the hash value / bucket index 464 * This hash chain list-traversal primitive may safely run concurrently with 468 #define rht_for_each_rcu(pos, tbl, hash) \ 469 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash) 472 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain 477 * @hash: the hash value / bucket index 480 * This hash chain list-traversal primitive may safely run concurrently with 484 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \ 486 pos = rht_dereference_bucket_rcu(head, tbl, hash); \ 488 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) 491 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type 495 * @hash: the hash value / bucket index 498 * This hash chain list-traversal primitive may safely run concurrently with 502 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ 503 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\ 504 tbl, hash, member) 516 * rhashtable_lookup_fast - search hash table, inlined version 517 * @ht: hash table 519 * @params: hash table parameters 521 * Computes the hash value for the key and traverses the bucket chain looking 536 unsigned int hash; rhashtable_lookup_fast() local 542 hash = rht_key_hashfn(ht, tbl, key, params); rht_for_each_rcu() 543 rht_for_each_rcu(he, tbl, hash) { rht_for_each_rcu() 576 unsigned int hash; __rhashtable_insert_fast() local 588 hash = rht_head_hashfn(ht, tbl, obj, params); __rhashtable_insert_fast() 589 lock = rht_bucket_lock(tbl, hash); __rhashtable_insert_fast() 592 if (tbl->rehash <= hash) __rhashtable_insert_fast() 624 rht_for_each(head, tbl, hash) { rht_for_each() 636 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 640 rcu_assign_pointer(tbl->buckets[hash], obj); 654 * rhashtable_insert_fast - insert object into hash table 655 * @ht: hash table 656 * @obj: pointer to hash head inside object 657 * @params: hash table parameters 677 * rhashtable_lookup_insert_fast - lookup and insert object into hash table 678 * @ht: hash table 679 * @obj: pointer to hash head inside object 680 * @params: hash table parameters 688 * This lookup function may only be used for fixed key hash table (key_len 710 * rhashtable_lookup_insert_key - search and insert object to hash table 712 * @ht: hash table 714 * @obj: pointer to hash head inside object 715 * @params: hash table parameters 748 unsigned int hash; __rhashtable_remove_fast() local 751 hash = rht_head_hashfn(ht, tbl, obj, params); __rhashtable_remove_fast() 752 lock = rht_bucket_lock(tbl, hash); __rhashtable_remove_fast() 756 pprev = &tbl->buckets[hash]; rht_for_each() 757 rht_for_each(he, tbl, hash) { rht_for_each() 774 * rhashtable_remove_fast - remove object from hash table 775 * @ht: hash table 776 * @obj: pointer to hash head inside object 777 * @params: hash table parameters 779 * Since the hash chain is single linked, the removal operation needs to 781 * considerable slow if the hash table is not correctly sized. 204 rht_bucket_index(const struct bucket_table *tbl, unsigned int hash) rht_bucket_index() argument 317 rht_bucket_lock(const struct bucket_table *tbl, unsigned int hash) rht_bucket_lock() argument 332 lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) lockdep_rht_bucket_is_held() argument
|
H A D | jhash.h | 4 /* jhash.h: Jenkins hash support. 8 * http://burtleburtle.net/bob/hash/ 14 * These are functions for producing 32-bit hashes for hash table lookup. 16 * are externally useful functions. Routines to test the hash are included 22 * I've modified Bob's hash to be useful in the Linux kernel, and 29 /* Best hash sizes are of power of two */ 31 /* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ 60 /* jhash - hash an arbitrary key 63 * @initval: the previous hash, or an arbitray value 68 * Returns the hash value of the key. The result depends on endianness. 110 /* jhash2 - hash an array of u32's 113 * @initval: the previous hash, or an arbitray value 115 * Returns the hash value of the key. 148 /* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */ __jhash_nwords()
|
H A D | cryptohash.h | 16 void md5_transform(__u32 *hash, __u32 const *in);
|
H A D | if_tunnel.h | 10 * Locking : hash tables are protected by RCU and RTNL
|
H A D | dm-region-hash.h | 5 * Device-Mapper dirty region hash interface. 16 * Region hash 32 * Region hash create/destroy. 71 /* Flush the region hash and dirty log. */
|
H A D | rculist_bl.h | 27 * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization 28 * @n: the element to delete from the hash list. 35 * that may still be used for walking the hash list and we can only 55 * hlist_bl_del_rcu - deletes entry from hash list without re-initialization 56 * @n: the element to delete from the hash list. 63 * pointers that may still be used for walking the hash list. 81 * @n: the element to add to the hash list.
|
H A D | rculist_nulls.h | 13 * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization 14 * @n: the element to delete from the hash list. 21 * that may still be used for walking the hash list and we can only 47 * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization 48 * @n: the element to delete from the hash list. 55 * pointers that may still be used for walking the hash list. 73 * @n: the element to add to the hash list.
|
H A D | ethtool.h | 63 ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */ 64 ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */ 67 * Add your fresh new hash function bits above and remember to update 89 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection 90 * @index: Index in RX flow hash indirection table 93 * This function provides the default policy for RX flow hash indirection. 177 * @get_rxfh_key_size: Get the size of the RX flow hash key. 179 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. 181 * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key 182 * and/or hash function. 184 * @set_rxfh: Set the contents of the RX flow hash indirection table, hash 185 * key, and/or hash function. Arguments which are set to %NULL or zero
|
H A D | dcache.h | 28 /* The hash is always the low bits of hash_len */ 30 #define HASH_LEN_DECLARE u32 hash; u32 len; 33 #define HASH_LEN_DECLARE u32 len; u32 hash; 39 * saves "metadata" about the string (ie length and the hash). 41 * hash comes first so it snuggles against d_parent in the 57 #define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash)) 68 /* Name hashing routines. Initial hash value */ 69 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ 72 /* partial hash update function. Assume roughly 4 bits per character */ 83 static inline unsigned long end_name_hash(unsigned long hash) end_name_hash() argument 85 return (unsigned int) hash; end_name_hash() 88 /* Compute the hash for a name string. */ 112 struct hlist_bl_node d_hash; /* lookup hash list */ 272 * This adds the entry to the hash queues. 277 * d_add - add dentry to hash queues 281 * This adds the entry to the hash queues and initializes @inode. 292 * d_add_unique - add dentry to hash queues without aliasing 296 * This adds the entry to the hash queues and initializes @inode.
|
/linux-4.1.27/fs/hpfs/ |
H A D | dentry.c | 17 unsigned long hash; hpfs_hash_dentry() local 29 hash = init_name_hash(); hpfs_hash_dentry() 31 hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash); hpfs_hash_dentry() 32 qstr->hash = end_name_hash(hash); hpfs_hash_dentry()
|
/linux-4.1.27/include/crypto/ |
H A D | md5.h | 12 u32 hash[MD5_HASH_WORDS]; member in struct:md5_state
|
H A D | sha.h | 91 unsigned int len, u8 *hash); 97 unsigned int len, u8 *hash); 103 unsigned int len, u8 *hash);
|
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/ |
H A D | libcfs_crypto.h | 32 char *cht_name; /**< hash algorithm name, equal to 36 unsigned int cht_size; /**< hash digest size */ 64 /** Return pointer to type of hash for valid hash algorithm identifier */ 78 /** Return hash name for valid hash algorithm identifier or "unknown" */ cfs_crypto_hash_name() 100 /** Return hash identifier for valid hash algorithm name or 0xFF */ cfs_crypto_hash_alg() 111 /** Calculate hash digest for buffer. 112 * @param alg id of hash algorithm 118 * @param hash [out] pointer to hash, if it is NULL, hash_len is 120 * @param hash_len [in,out] size of hash buffer 124 * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than 132 unsigned char *hash, unsigned int *hash_len); 134 /* cfs crypto hash descriptor */ 137 /** Allocate and initialize descriptor for hash algorithm. 142 * @returns pointer to descriptor of hash instance 150 * @param desc hash descriptor 162 * @param desc hash descriptor 171 /** Finalize hash calculation, copy hash digest to buffer, destroy hash 173 * @param desc hash descriptor 174 * @param hash buffer pointer to store hash digest 175 * @param hash_len pointer to hash buffer size, if NULL 176 * destroy hash descriptor 178 * @retval -ENOSPC if hash is NULL, or *hash_len less than 184 unsigned char *hash, unsigned int *hash_len); 186 * Register crypto hash algorithms 195 /** Return hash speed in Mbytes per second for valid hash algorithm
|
H A D | libcfs_hash.h | 66 #include <linux/hash.h> 70 /** record hash depth and output to console when it's too deep, 90 * - array of hash-head starting from hsb_head[0], hash-head can be one of 96 * - some extra bytes (caller can require it while creating hash) 104 long hsb_head[0]; /**< hash-head array */ 123 * common hash attributes. 135 /** no bucket lock, use one spinlock to protect the whole hash */ 143 /** hash-table doesn't have refcount on item */ 151 /** Enable dynamic hash resizing */ 153 /** can shrink hash-size */ 155 /** assert hash is empty on exit */ 161 * change on hash table is non-blocking 173 * cfs_hash is a hash-table implementation for general purpose, it can support: 175 * hash-table with & without refcount 185 * support long name hash 201 * . if rehash is in progress while we try to iterate the hash table, 203 * should expect iteration of whole hash-table to be non-blocking. 213 * the hash-table has CFS_HASH_NO_BKTLOCK */ 215 /** hash operations */ 217 /** hash lock operations */ 219 /** hash list operations */ 221 /** hash buckets-table */ 223 /** total number of items on this hash-table */ 225 /** hash flags, see cfs_hash_tag for detail */ 231 /** hash-table is dying */ 233 /** current hash bits */ 235 /** min hash bits */ 237 /** max hash bits */ 253 /** refcount on this hash table */ 276 /** lock the hash table */ 278 /** unlock the hash table */ 280 /** lock the hash bucket */ 282 /** unlock the hash bucket */ 287 /** return hlist_head of hash-head of @bd */ 289 /** return hash-head size */ 291 /** add @hnode to hash-head of @bd */ 294 /** remove @hnode from hash-head of @bd */ 343 /* caller will serialize all operations for this hash-table */ cfs_hash_with_no_lock() 350 /* no bucket lock, one single lock to protect the hash-table */ cfs_hash_with_no_bktlock() 357 /* rwlock to protect hash bucket */ cfs_hash_with_rw_bktlock() 364 /* spinlock to protect hash bucket */ cfs_hash_with_spin_bktlock() 377 /* hash-table doesn't keep refcount on item, cfs_hash_with_no_itemref() 378 * item can't be removed from hash unless it's cfs_hash_with_no_itemref() 547 * they are normally for hash-table without rehash 644 * they are safe for hash-table with rehash 715 * hash depth assuming a perfectly uniform hash function. 792 * Generic djb2 hash algorithm for character arrays. 797 unsigned i, hash = 5381; 802 hash = hash * 33 + ((char *)key)[i]; 804 return (hash & mask); 808 * Generic u32 hash algorithm. 817 * Generic u64 hash algorithm.
|
/linux-4.1.27/crypto/ |
H A D | md4.c | 23 #include <crypto/internal/hash.h> 37 u32 hash[MD4_HASH_WORDS]; member in struct:md4_ctx 84 static void md4_transform(u32 *hash, u32 const *in) md4_transform() argument 88 a = hash[0]; md4_transform() 89 b = hash[1]; md4_transform() 90 c = hash[2]; md4_transform() 91 d = hash[3]; md4_transform() 144 hash[0] += a; md4_transform() 145 hash[1] += b; md4_transform() 146 hash[2] += c; md4_transform() 147 hash[3] += d; md4_transform() 153 md4_transform(ctx->hash, ctx->block); md4_transform_helper() 160 mctx->hash[0] = 0x67452301; md4_init() 161 mctx->hash[1] = 0xefcdab89; md4_init() 162 mctx->hash[2] = 0x98badcfe; md4_init() 163 mctx->hash[3] = 0x10325476; md4_init() 221 md4_transform(mctx->hash, mctx->block); md4_final() 222 cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash)); md4_final() 223 memcpy(out, mctx->hash, sizeof(mctx->hash)); md4_final()
|
H A D | md5.c | 18 #include <crypto/internal/hash.h> 47 md5_transform(ctx->hash, ctx->block); md5_transform_helper() 54 mctx->hash[0] = 0x67452301; md5_init() 55 mctx->hash[1] = 0xefcdab89; md5_init() 56 mctx->hash[2] = 0x98badcfe; md5_init() 57 mctx->hash[3] = 0x10325476; md5_init() 115 md5_transform(mctx->hash, mctx->block); md5_final() 116 cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); md5_final() 117 memcpy(out, mctx->hash, sizeof(mctx->hash)); md5_final()
|
H A D | hmac.c | 19 #include <crypto/internal/hash.h> 29 struct crypto_shash *hash; member in struct:hmac_ctx 54 struct crypto_shash *hash = ctx->hash; hmac_setkey() local 55 SHASH_DESC_ON_STACK(shash, hash); hmac_setkey() 58 shash->tfm = hash; hmac_setkey() 103 desc->tfm = ctx->hash; hmac_import() 159 struct crypto_shash *hash; hmac_init_tfm() local 164 hash = crypto_spawn_shash(spawn); hmac_init_tfm() 165 if (IS_ERR(hash)) hmac_init_tfm() 166 return PTR_ERR(hash); hmac_init_tfm() 169 crypto_shash_descsize(hash); hmac_init_tfm() 171 ctx->hash = hash; hmac_init_tfm() 178 crypto_free_shash(ctx->hash); hmac_exit_tfm() 270 MODULE_DESCRIPTION("HMAC hash algorithm");
|
H A D | algif_hash.c | 2 * algif_hash: User-space interface for hash algorithms 4 * This file provides the user-space API for hash algorithms. 15 #include <crypto/hash.h> 38 struct crypto_ahash *hash; member in struct:algif_hash_tfm 348 struct crypto_ahash *hash; hash_bind() local 354 hash = crypto_alloc_ahash(name, type, mask); hash_bind() 355 if (IS_ERR(hash)) { hash_bind() 357 return ERR_CAST(hash); hash_bind() 360 tfm->hash = hash; hash_bind() 369 crypto_free_ahash(tfm->hash); hash_release() 378 err = crypto_ahash_setkey(tfm->hash, key, keylen); hash_setkey() 400 struct crypto_ahash *hash = tfm->hash; hash_accept_parent_nokey() local 401 unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); hash_accept_parent_nokey() 402 unsigned ds = crypto_ahash_digestsize(hash); hash_accept_parent_nokey() 422 ahash_request_set_tfm(&ctx->req, hash); hash_accept_parent_nokey() 435 if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash)) hash_accept_parent() 449 .name = "hash",
|
H A D | ahash.c | 4 * This is the asynchronous version of hash.c with notification of 16 #include <crypto/internal/hash.h> 37 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) crypto_ahash_alg() argument 39 return container_of(crypto_hash_alg_common(hash), struct ahash_alg, crypto_ahash_alg() 249 * is necessary. See include/crypto/hash.h and include/linux/crypto.h ahash_save_req() 451 struct crypto_ahash *hash = __crypto_ahash_cast(tfm); crypto_ahash_init_tfm() local 452 struct ahash_alg *alg = crypto_ahash_alg(hash); crypto_ahash_init_tfm() 454 hash->setkey = ahash_nosetkey; crypto_ahash_init_tfm() 455 hash->has_setkey = false; crypto_ahash_init_tfm() 456 hash->export = ahash_no_export; crypto_ahash_init_tfm() 457 hash->import = ahash_no_import; crypto_ahash_init_tfm() 462 hash->init = alg->init; crypto_ahash_init_tfm() 463 hash->update = alg->update; crypto_ahash_init_tfm() 464 hash->final = alg->final; crypto_ahash_init_tfm() 465 hash->finup = alg->finup ?: ahash_def_finup; crypto_ahash_init_tfm() 466 hash->digest = alg->digest; crypto_ahash_init_tfm() 469 hash->setkey = alg->setkey; crypto_ahash_init_tfm() 470 hash->has_setkey = true; crypto_ahash_init_tfm() 473 hash->export = alg->export; crypto_ahash_init_tfm() 475 hash->import = alg->import; crypto_ahash_init_tfm() 620 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
|
H A D | crc32.c | 33 #include <crypto/internal/hash.h> 63 static int crc32_setkey(struct crypto_shash *hash, const u8 *key, crc32_setkey() argument 66 u32 *mctx = crypto_shash_ctx(hash); crc32_setkey() 69 crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); crc32_setkey()
|
H A D | sha512_generic.c | 13 #include <crypto/internal/hash.h> 150 static int sha512_final(struct shash_desc *desc, u8 *hash) sha512_final() argument 153 return sha512_base_finish(desc, hash); sha512_final() 157 unsigned int len, u8 *hash) crypto_sha512_finup() 160 return sha512_final(desc, hash); crypto_sha512_finup() 156 crypto_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) crypto_sha512_finup() argument
|
H A D | testmgr.c | 23 #include <crypto/hash.h> 128 struct hash_test_suite hash; member in union:alg_test_desc::__anon3263 219 printk(KERN_ERR "alg: hash: Failed to allocate request for " __test_hash() 247 pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", __test_hash() 255 printk(KERN_ERR "alg: hash: setkey failed on " __test_hash() 266 pr_err("alg: hash: digest failed on test %d " __test_hash() 273 pr_err("alt: hash: init failed on test %d " __test_hash() 279 pr_err("alt: hash: update failed on test %d " __test_hash() 285 pr_err("alt: hash: final failed on test %d " __test_hash() 293 printk(KERN_ERR "alg: hash: Test %d failed for %s\n", __test_hash() 331 pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", __test_hash() 341 printk(KERN_ERR "alg: hash: setkey " __test_hash() 362 printk(KERN_ERR "alg: hash: digest failed " __test_hash() 370 printk(KERN_ERR "alg: hash: Chunking test %d " __test_hash() 1641 printk(KERN_ERR "alg: hash: Failed to load transform for %s: " alg_test_hash() 1646 err = test_hash(tfm, desc->suite.hash.vecs, alg_test_hash() 1647 desc->suite.hash.count, true); alg_test_hash() 1649 err = test_hash(tfm, desc->suite.hash.vecs, alg_test_hash() 1650 desc->suite.hash.count, false); alg_test_hash() 2303 .hash = { 2312 .hash = { 2325 .hash = { 2335 .hash = { 3006 .hash = { 3015 .hash = { 3024 .hash = { 3033 .hash = { 3042 .hash = { 3052 .hash = { 3062 .hash = { 3072 .hash = { 3082 .hash = { 3092 .hash = { 3224 .hash = { 3233 .hash = { 3242 .hash = { 3345 .hash = { 3354 .hash = { 3363 .hash = { 3372 .hash = { 3393 .hash = { 3403 .hash = { 3413 .hash = { 3423 .hash = { 3433 .hash = { 3442 .hash = { 3451 .hash = { 3460 .hash = { 3469 .hash = { 3478 .hash = { 3487 .hash = { 3496 .hash = { 3505 .hash = {
|
H A D | tgr192.c | 24 #include <crypto/internal/hash.h> 39 u8 hash[64]; member in struct:tgr192_ctx 520 tgr192_transform(tctx, tctx->hash); tgr192_update() 529 tctx->hash[tctx->count++] = *inbuf++; tgr192_update() 546 tctx->hash[tctx->count++] = *inbuf++; tgr192_update() 582 tctx->hash[tctx->count++] = 0x01; /* pad */ tgr192_final() 584 tctx->hash[tctx->count++] = 0; /* pad */ tgr192_final() 587 tctx->hash[tctx->count++] = 0x01; /* pad character */ tgr192_final() 589 tctx->hash[tctx->count++] = 0; tgr192_final() 592 memset(tctx->hash, 0, 56); /* fill next block with zeroes */ tgr192_final() 595 le32p = (__le32 *)&tctx->hash[56]; tgr192_final() 599 tgr192_transform(tctx, tctx->hash); tgr192_final() 601 be64p = (__be64 *)tctx->hash; tgr192_final()
|
H A D | authenc.c | 14 #include <crypto/internal/hash.h> 266 u8 *hash = areq_ctx->tail; crypto_authenc_ahash_fb() local 269 hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), crypto_authenc_ahash_fb() 278 ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); crypto_authenc_ahash_fb() 286 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, crypto_authenc_ahash_fb() 295 return hash; crypto_authenc_ahash_fb() 305 u8 *hash = areq_ctx->tail; crypto_authenc_ahash() local 308 hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth), crypto_authenc_ahash() 312 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, crypto_authenc_ahash() 321 return hash; crypto_authenc_ahash() 338 u8 *hash; crypto_authenc_genicv() local 366 hash = authenc_ahash_fn(req, flags); crypto_authenc_genicv() 367 if (IS_ERR(hash)) crypto_authenc_genicv() 368 return PTR_ERR(hash); crypto_authenc_genicv() 370 scatterwalk_map_and_copy(hash, dst, cryptlen, crypto_authenc_genicv()
|
/linux-4.1.27/scripts/genksyms/ |
H A D | Makefile | 12 $(obj)/lex.lex.o: $(obj)/keywords.hash.c $(obj)/parse.tab.h 14 clean-files := keywords.hash.c lex.lex.c parse.tab.c parse.tab.h
|
/linux-4.1.27/arch/powerpc/crypto/ |
H A D | md5-glue.c | 15 #include <crypto/internal/hash.h> 40 sctx->hash[0] = 0x67452301; ppc_md5_init() 41 sctx->hash[1] = 0xefcdab89; ppc_md5_init() 42 sctx->hash[2] = 0x98badcfe; ppc_md5_init() 43 sctx->hash[3] = 0x10325476; ppc_md5_init() 66 ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1); ppc_md5_update() 72 ppc_md5_transform(sctx->hash, src, len >> 6); ppc_md5_update() 95 ppc_md5_transform(sctx->hash, src, 1); ppc_md5_final() 102 ppc_md5_transform(sctx->hash, src, 1); ppc_md5_final() 104 dst[0] = cpu_to_le32(sctx->hash[0]); ppc_md5_final() 105 dst[1] = cpu_to_le32(sctx->hash[1]); ppc_md5_final() 106 dst[2] = cpu_to_le32(sctx->hash[2]); ppc_md5_final() 107 dst[3] = cpu_to_le32(sctx->hash[3]); ppc_md5_final()
|
/linux-4.1.27/net/ceph/ |
H A D | ceph_hash.c | 6 * Robert Jenkin's hash function. 7 * http://burtleburtle.net/bob/hash/evahash.html 82 * linux dcache hash 86 unsigned long hash = 0; ceph_str_hash_linux() local 91 hash = (hash + (c << 4) + (c >> 4)) * 11; ceph_str_hash_linux() 93 return hash; ceph_str_hash_linux()
|
H A D | Makefile | 8 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
|
/linux-4.1.27/lib/ |
H A D | md5.c | 13 void md5_transform(__u32 *hash, __u32 const *in) md5_transform() argument 17 a = hash[0]; md5_transform() 18 b = hash[1]; md5_transform() 19 c = hash[2]; md5_transform() 20 d = hash[3]; md5_transform() 90 hash[0] += a; md5_transform() 91 hash[1] += b; md5_transform() 92 hash[2] += c; md5_transform() 93 hash[3] += d; md5_transform()
|
H A D | oid_registry.c | 34 unsigned i, j, k, hash; look_up_OID() local 38 hash = datasize - 1; look_up_OID() 41 hash += octets[i] * 33; look_up_OID() 42 hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash; look_up_OID() 43 hash &= 0xff; look_up_OID() 46 * of hash value then ascending order of size and then in ascending look_up_OID() 54 xhash = oid_search_table[j].hash; look_up_OID() 55 if (xhash > hash) { look_up_OID() 59 if (xhash < hash) { look_up_OID()
|
H A D | rhashtable.c | 51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) lockdep_rht_bucket_is_held() argument 53 spinlock_t *lock = rht_bucket_lock(tbl, hash); lockdep_rht_bucket_is_held() 277 * rhashtable_expand - Expand hash table while allowing concurrent lookups 278 * @ht: the hash table to expand 280 * A secondary bucket array is allocated and the hash entries are migrated. 312 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 313 * @ht: the hash table to shrink 315 * This function shrinks the hash table to fit, i.e., the smallest 383 unsigned int hash) rhashtable_check_elasticity() 388 rht_for_each(head, tbl, hash) rhashtable_check_elasticity() 440 unsigned int hash; rhashtable_insert_slow() local 444 hash = head_hashfn(ht, tbl, obj); rhashtable_insert_slow() 445 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); rhashtable_insert_slow() 456 if (rhashtable_check_elasticity(ht, tbl, hash) || rhashtable_insert_slow() 462 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); rhashtable_insert_slow() 466 rcu_assign_pointer(tbl->buckets[hash], obj); rhashtable_insert_slow() 471 spin_unlock(rht_bucket_lock(tbl, hash)); rhashtable_insert_slow() 482 * This function prepares a hash table walk. 490 * structure outside the hash table. 536 * rhashtable_walk_start - Start a hash table walk 539 * Start a hash table walk. Note that we take the RCU lock in all 633 * rhashtable_walk_stop - Finish a hash table walk 636 * Finish a hash table walk. 675 * rhashtable_init - initialize a new hash table 676 * @ht: hash table to be initialized 679 * Initializes a new hash table based on the provided configuration 708 * return [... hash ...]; 755 * size of the hash table, at a rate of (log N)/(log log N). rhashtable_init() 756 * The value of 16 is selected so that even if the hash rhashtable_init() 799 * rhashtable_free_and_destroy - free elements and destroy hash table 800 * @ht: the hash table to destroy 381 rhashtable_check_elasticity(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) rhashtable_check_elasticity() argument
|
H A D | digsig.c | 25 #include <crypto/hash.h> 186 * Normally hash of the content is used as a data for this function. 195 unsigned char hash[SHA1_DIGEST_SIZE]; digsig_verify() local 235 crypto_shash_final(desc, hash); digsig_verify() 241 hash, sizeof(hash)); digsig_verify()
|
/linux-4.1.27/arch/arm/crypto/ |
H A D | sha256_glue.h | 12 unsigned int len, u8 *hash);
|
H A D | sha1-ce-glue.c | 2 * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions 11 #include <crypto/internal/hash.h> 23 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
|
/linux-4.1.27/fs/f2fs/ |
H A D | Makefile | 3 f2fs-y := dir.o file.o inode.o namei.o hash.o super.o inline.o
|
H A D | hash.c | 2 * fs/f2fs/hash.c 7 * Portions of this code from linux/fs/ext3/hash.c 75 __u32 hash; f2fs_dentry_hash() local 86 /* Initialize the default seed for the hash checksum functions */ f2fs_dentry_hash() 101 hash = buf[0]; f2fs_dentry_hash() 102 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); f2fs_dentry_hash()
|
/linux-4.1.27/security/apparmor/ |
H A D | crypto.c | 18 #include <crypto/hash.h> 45 profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); aa_calc_profile_hash() 46 if (!profile->hash) aa_calc_profile_hash() 61 error = crypto_shash_final(&desc.shash, profile->hash); aa_calc_profile_hash() 68 kfree(profile->hash); aa_calc_profile_hash() 69 profile->hash = NULL; aa_calc_profile_hash()
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | flowring.c | 70 struct brcmf_flowring_hash *hash; brcmf_flowring_lookup() local 92 hash = flow->hash; brcmf_flowring_lookup() 94 if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) && brcmf_flowring_lookup() 95 (hash[hash_idx].fifo == fifo) && brcmf_flowring_lookup() 96 (hash[hash_idx].ifidx == ifidx)) { brcmf_flowring_lookup() 103 return hash[hash_idx].flowid; brcmf_flowring_lookup() 113 struct brcmf_flowring_hash *hash; brcmf_flowring_create() local 135 hash = flow->hash; brcmf_flowring_create() 137 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) && brcmf_flowring_create() 138 (is_zero_ether_addr(hash[hash_idx].mac))) { brcmf_flowring_create() 156 memcpy(hash[hash_idx].mac, mac, ETH_ALEN); brcmf_flowring_create() 157 hash[hash_idx].fifo = fifo; brcmf_flowring_create() 158 hash[hash_idx].ifidx = ifidx; brcmf_flowring_create() 159 hash[hash_idx].flowid = i; brcmf_flowring_create() 178 return flow->hash[ring->hash_id].fifo; brcmf_flowring_tid() 238 flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; brcmf_flowring_delete() 239 eth_zero_addr(flow->hash[hash_idx].mac); brcmf_flowring_delete() 346 return flow->hash[hash_idx].ifidx; brcmf_flowring_ifidx_get() 362 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) brcmf_flowring_attach() 363 flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX; brcmf_flowring_attach() 409 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) { brcmf_flowring_configure_addr_mode() 410 if (flow->hash[i].ifidx == ifidx) { brcmf_flowring_configure_addr_mode() 411 flowid = flow->hash[i].flowid; brcmf_flowring_configure_addr_mode() 428 struct brcmf_flowring_hash *hash; brcmf_flowring_delete_peer() local 448 hash = flow->hash; brcmf_flowring_delete_peer() 450 if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) && brcmf_flowring_delete_peer() 451 (hash[i].ifidx == ifidx)) { brcmf_flowring_delete_peer() 452 flowid = flow->hash[i].flowid; brcmf_flowring_delete_peer()
|
/linux-4.1.27/arch/mips/cavium-octeon/crypto/ |
H A D | octeon-md5.c | 30 #include <crypto/internal/hash.h> 40 u64 *hash = (u64 *)ctx->hash; octeon_md5_store_hash() local 42 write_octeon_64bit_hash_dword(hash[0], 0); octeon_md5_store_hash() 43 write_octeon_64bit_hash_dword(hash[1], 1); octeon_md5_store_hash() 48 u64 *hash = (u64 *)ctx->hash; octeon_md5_read_hash() local 50 hash[0] = read_octeon_64bit_hash_dword(0); octeon_md5_read_hash() 51 hash[1] = read_octeon_64bit_hash_dword(1); octeon_md5_read_hash() 72 mctx->hash[0] = cpu_to_le32(0x67452301); octeon_md5_init() 73 mctx->hash[1] = cpu_to_le32(0xefcdab89); octeon_md5_init() 74 mctx->hash[2] = cpu_to_le32(0x98badcfe); octeon_md5_init() 75 mctx->hash[3] = cpu_to_le32(0x10325476); octeon_md5_init() 150 memcpy(out, mctx->hash, sizeof(mctx->hash)); octeon_md5_final()
|
H A D | octeon-sha256.c | 28 #include <crypto/internal/hash.h> 38 u64 *hash = (u64 *)sctx->state; octeon_sha256_store_hash() local 40 write_octeon_64bit_hash_dword(hash[0], 0); octeon_sha256_store_hash() 41 write_octeon_64bit_hash_dword(hash[1], 1); octeon_sha256_store_hash() 42 write_octeon_64bit_hash_dword(hash[2], 2); octeon_sha256_store_hash() 43 write_octeon_64bit_hash_dword(hash[3], 3); octeon_sha256_store_hash() 48 u64 *hash = (u64 *)sctx->state; octeon_sha256_read_hash() local 50 hash[0] = read_octeon_64bit_hash_dword(0); octeon_sha256_read_hash() 51 hash[1] = read_octeon_64bit_hash_dword(1); octeon_sha256_read_hash() 52 hash[2] = read_octeon_64bit_hash_dword(2); octeon_sha256_read_hash() 53 hash[3] = read_octeon_64bit_hash_dword(3); octeon_sha256_read_hash() 201 static int octeon_sha224_final(struct shash_desc *desc, u8 *hash) octeon_sha224_final() argument 207 memcpy(hash, D, SHA224_DIGEST_SIZE); octeon_sha224_final()
|
H A D | octeon-sha1.c | 27 #include <crypto/internal/hash.h> 37 u64 *hash = (u64 *)sctx->state; octeon_sha1_store_hash() local 43 write_octeon_64bit_hash_dword(hash[0], 0); octeon_sha1_store_hash() 44 write_octeon_64bit_hash_dword(hash[1], 1); octeon_sha1_store_hash() 51 u64 *hash = (u64 *)sctx->state; octeon_sha1_read_hash() local 57 hash[0] = read_octeon_64bit_hash_dword(0); octeon_sha1_read_hash() 58 hash[1] = read_octeon_64bit_hash_dword(1); octeon_sha1_read_hash()
|
/linux-4.1.27/security/integrity/ima/ |
H A D | ima_api.c | 78 * Calculate the hash of a template entry, add the template entry 102 } hash; ima_store_template() local 108 hash.hdr.algo = HASH_ALGO_SHA1; ima_store_template() 111 num_fields, &hash.hdr); ima_store_template() 118 memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); ima_store_template() 127 * Violations are flagged in the measurement list with zero hash values. 184 * Calculate the file hash, if it doesn't already exist, 203 } hash; ima_collect_measurement() local 217 /* use default hash algorithm */ ima_collect_measurement() 218 hash.hdr.algo = ima_hash_algo; ima_collect_measurement() 221 ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr); ima_collect_measurement() 223 result = ima_calc_file_hash(file, &hash.hdr); ima_collect_measurement() 225 int length = sizeof(hash.hdr) + hash.hdr.length; ima_collect_measurement() 230 memcpy(iint->ima_hash, &hash, length); ima_collect_measurement() 294 char hash[(iint->ima_hash->length * 2) + 1]; ima_audit_measurement() local 296 char algo_hash[sizeof(hash) + strlen(algo_name) + 2]; ima_audit_measurement() 303 hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]); ima_audit_measurement() 304 hash[i * 2] = '\0'; ima_audit_measurement() 313 audit_log_format(ab, " hash="); ima_audit_measurement() 314 snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash); ima_audit_measurement()
|
H A D | ima_crypto.c | 13 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash 26 #include <crypto/hash.h> 228 struct ima_digest_data *hash, ima_calc_file_hash_atfm() 239 hash->length = crypto_ahash_digestsize(tfm); ima_calc_file_hash_atfm() 329 ahash_request_set_crypt(req, NULL, hash->digest, 0); ima_calc_file_hash_atfm() 337 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) ima_calc_file_ahash() argument 342 tfm = ima_alloc_atfm(hash->algo); ima_calc_file_ahash() 346 rc = ima_calc_file_hash_atfm(file, hash, tfm); ima_calc_file_ahash() 354 struct ima_digest_data *hash, ima_calc_file_hash_tfm() 365 hash->length = crypto_shash_digestsize(tfm); ima_calc_file_hash_tfm() 406 rc = crypto_shash_final(shash, hash->digest); ima_calc_file_hash_tfm() 410 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) ima_calc_file_shash() argument 415 tfm = ima_alloc_tfm(hash->algo); ima_calc_file_shash() 419 rc = ima_calc_file_hash_tfm(file, hash, tfm); ima_calc_file_shash() 427 * ima_calc_file_hash - calculate file hash 429 * Asynchronous hash (ahash) allows using HW acceleration for calculating 430 * a hash. ahash performance varies for different data sizes on different 436 * shash for the hash calculation. If ahash fails, it falls back to using 439 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) ima_calc_file_hash() argument 447 rc = ima_calc_file_ahash(file, hash); ima_calc_file_hash() 452 return ima_calc_file_shash(file, hash); ima_calc_file_hash() 456 * Calculate the hash of template data 461 struct ima_digest_data *hash, ima_calc_field_array_hash_tfm() 470 hash->length = crypto_shash_digestsize(tfm); ima_calc_field_array_hash_tfm() 498 rc = crypto_shash_final(shash, hash->digest); ima_calc_field_array_hash_tfm() 505 struct ima_digest_data *hash) ima_calc_field_array_hash() 510 tfm = ima_alloc_tfm(hash->algo); ima_calc_field_array_hash() 515 hash, tfm); ima_calc_field_array_hash() 532 * Calculate the boot aggregate hash 559 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) ima_calc_boot_aggregate() argument 564 tfm = ima_alloc_tfm(hash->algo); ima_calc_boot_aggregate() 568 hash->length = crypto_shash_digestsize(tfm); ima_calc_boot_aggregate() 569 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); ima_calc_boot_aggregate() 227 ima_calc_file_hash_atfm(struct file *file, struct ima_digest_data *hash, struct crypto_ahash *tfm) ima_calc_file_hash_atfm() argument 353 ima_calc_file_hash_tfm(struct file *file, struct ima_digest_data *hash, struct crypto_shash *tfm) ima_calc_file_hash_tfm() argument 458 ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, struct ima_template_desc *td, int num_fields, struct ima_digest_data *hash, struct crypto_shash *tfm) ima_calc_field_array_hash_tfm() argument 503 ima_calc_field_array_hash(struct ima_field_data *field_data, struct ima_template_desc *desc, int num_fields, struct ima_digest_data *hash) ima_calc_field_array_hash() argument
|
H A D | ima_appraise.c | 134 struct ima_digest_data *hash) ima_get_hash_algo() 146 hash->algo = sig->hash_algo; ima_get_hash_algo() 149 hash->algo = xattr_value->digest[0]; ima_get_hash_algo() 156 hash->algo = HASH_ALGO_MD5; ima_get_hash_algo() 158 hash->algo = HASH_ALGO_SHA1; ima_get_hash_algo() 160 hash->algo = HASH_ALGO_MD5; ima_get_hash_algo() 181 * Assuming success, compare the xattr hash with the collected measurement. 204 cause = "missing-hash"; ima_appraise_measurement() 234 /* xattr length may be longer. md5 hash in previous ima_appraise_measurement() 243 cause = "invalid-hash"; ima_appraise_measurement() 288 * ima_update_xattr - update 'security.ima' hash value 295 /* do not collect and update hash for digital signatures */ ima_update_xattr() 133 ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, struct ima_digest_data *hash) ima_get_hash_algo() argument
|
H A D | ima_init.c | 63 } hash; ima_add_boot_aggregate() local 66 memset(&hash, 0, sizeof(hash)); ima_add_boot_aggregate() 67 iint->ima_hash = &hash.hdr; ima_add_boot_aggregate() 72 result = ima_calc_boot_aggregate(&hash.hdr); ima_add_boot_aggregate()
|
H A D | ima_queue.c | 46 /* lookup up the digest value in the hash table, and return the entry */ ima_lookup_digest_entry() 67 * - Add template entry to measurement list and hash table. 92 static int ima_pcr_extend(const u8 *hash) ima_pcr_extend() argument 99 result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); ima_pcr_extend() 105 /* Add template entry to the measurement list and hash table,
|
H A D | ima_template_lib.c | 166 * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest, ima_eventdigest_init_common() 167 * where <hash algo> is provided if the hash algoritm is not ima_eventdigest_init_common() 207 } hash; ima_eventdigest_init() local 213 memset(&hash, 0, sizeof(hash)); ima_eventdigest_init() 228 hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ? ima_eventdigest_init() 230 result = ima_calc_file_hash(file, &hash.hdr); ima_eventdigest_init() 237 cur_digest = hash.hdr.digest; ima_eventdigest_init() 238 cur_digestsize = hash.hdr.length; ima_eventdigest_init()
|
H A D | ima.h | 23 #include <linux/hash.h> 81 u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ 88 struct hlist_node hnext; /* place in hash collision list */ 100 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash); 103 struct ima_digest_data *hash); 104 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); 180 struct ima_digest_data *hash); 214 struct ima_digest_data *hash) ima_get_hash_algo() 212 ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, struct ima_digest_data *hash) ima_get_hash_algo() argument
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | hugepage-hash64.c | 16 * PPC64 THP Support for hash based MMUs 30 unsigned long vpn, hash, shift, slot; __hash_page_thp() local 93 * hash page table entries. __hash_page_thp() 103 hash = hpt_hash(vpn, shift, ssize); __hash_page_thp() 106 hash = ~hash; __hash_page_thp() 107 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; __hash_page_thp() 129 hash = hpt_hash(vpn, shift, ssize); __hash_page_thp() 142 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; __hash_page_thp() 144 /* Insert into the hash table, primary slot */ __hash_page_thp() 151 hpte_group = ((~hash & htab_hash_mask) * __hash_page_thp() 158 hpte_group = ((hash & htab_hash_mask) * __hash_page_thp() 183 * Mark the pte with _PAGE_COMBO, if we are trying to hash it with __hash_page_thp()
|
H A D | hugetlbpage-hash64.c | 2 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) 17 extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn, 75 unsigned long hash, slot; __hash_page_huge() local 77 hash = hpt_hash(vpn, shift, ssize); __hash_page_huge() 79 hash = ~hash; __hash_page_huge() 80 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; __hash_page_huge() 89 unsigned long hash = hpt_hash(vpn, shift, ssize); __hash_page_huge() local 107 slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, __hash_page_huge()
|
H A D | hash_low_64.S | 52 * Adds a 4K page to the hash table in a segment of 4K pages only 68 * r28 is a hash value 90 * is changing this PTE anyway and might hash it. 107 * Insert/Update the HPTE in the hash table. At this point, 120 * Calculate hash value for primary slot and store it in r28 125 xor r28,r5,r0 /* hash */ 134 * calculate hash value for primary slot and 142 xor r28,r28,r0 /* hash */ 194 /* Calculate primary group hash */ 196 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ 219 /* Calculate secondary group hash */ 221 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 246 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 286 /* Secondary group ? if yes, get a inverted hash value */ 294 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 362 * r28 is a hash value 388 * is changing this PTE anyway and might hash it. 405 * Insert/Update the HPTE in the hash table. At this point, 425 * Calculate hash value for primary slot and store it in r28 430 xor r28,r5,r0 /* hash */ 443 * Calculate hash value for primary slot and 451 xor r28,r28,r0 /* hash */ 500 * Check if the pte was already inserted into the hash table 523 /* Calculate primary group hash */ 525 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 552 /* Calculate secondary group hash */ 554 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 579 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 653 /* Secondary group ? if yes, get a inverted hash value */ 662 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 725 * r28 is a hash value 747 * is changing this PTE anyway and might hash it. 769 * Insert/Update the HPTE in the hash table. At this point, 782 /* Calculate hash value for primary slot and store it in r28 787 xor r28,r5,r0 /* hash */ 795 * calculate hash value for primary slot and 803 xor r28,r28,r0 /* hash */ 858 /* Calculate primary group hash */ 860 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 883 /* Calculate secondary group hash */ 885 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 910 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 950 /* Secondary group ? if yes, get a inverted hash value */ 958 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
|
H A D | hash_low_32.S | 12 * the PowerPC MMU hash table. (PPC 8xx processors don't use a 13 * hash table, so this file is not used on them.) 39 * Load a PTE into the hash table, if possible. 47 * in the hash table and returns from the exception. 141 bl create_hpte /* add the hash table entry */ 168 * Add an entry for a particular page to the hash table. 174 * a hash table in use (i.e. we're not on a 603). 198 * we can't take a hash table miss (assuming the code is 275 * This routine adds a hardware PTE to the hash table. 292 * physical address of the hash table are known. These definitions 296 Hash_bits = 12 /* e.g. 256kB hash table */ 339 /* Get the address of the primary PTE group in the hash table (r3) */ 341 addis r0,r7,Hash_base@h /* base address of hash table */ 342 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 343 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 344 xor r3,r3,r0 /* make primary hash */ 369 ori r5,r5,PTE_H /* set H (secondary hash) bit */ 371 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 396 ori r5,r5,PTE_H /* set H (secondary hash) bit */ 398 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 419 * trying to hash in the kernel hash code itself after it has 420 * already taken the hash table lock. This works in conjunction 423 * If the hash table bucket is full of kernel text entries, we'll 450 * Between the tlbie above and updating the hash table entry below, 451 * another CPU could read the hash table entry and put it in its TLB. 461 * and gets the new PTE from the hash table. 492 * Flush the entry for a particular page from the hash table. 497 * We assume that there is a hash table in use (Hash != 0). 507 * we can't take a hash table miss (assuming the code is 579 /* Get the address of the primary PTE group in the hash table (r3) */ 581 addis r8,r7,Hash_base@h /* base address of hash table */ 582 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 583 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 584 xor r8,r0,r8 /* make primary hash */ 596 ori r11,r11,PTE_H /* set H (secondary hash) bit */ 599 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
|
H A D | ppc_mmu_32.c | 163 * Preload a translation in the hash table 178 * Initialize the hash table and patch the instructions in hashtable.S. 202 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105); MMU_init_hw() 206 #define MIN_N_HPTEG 1024 /* min 64kB hash table */ MMU_init_hw() 224 * Find some memory for the hash table. MMU_init_hw() 226 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); MMU_init_hw() 233 printk("Total memory = %lldMB; using %ldkB for hash table (at %p)\n", MMU_init_hw() 240 if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345); MMU_init_hw() 273 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); MMU_init_hw()
|
H A D | tlb_hash32.c | 3 * On machines where the MMU uses a hash table to store virtual to 5 * hash table also. 38 * Called when unmapping pages to flush entries from the TLB/hash table. 71 * it doesn't use a hash table. tlb_flush() 85 * since the hardware hash table functions as an extension of the
|
H A D | hash_utils_64.c | 199 unsigned long hash, hpteg; htab_bolt_mapping() local 231 hash = hpt_hash(vpn, shift, ssize); htab_bolt_mapping() 232 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); htab_bolt_mapping() 594 /* If hash size isn't already provided by the platform, we try to htab_get_table_size() 757 /* create bolted the linear mapping in the hash table */ for_each_memblock() 839 /* Initialize hash table for that CPU */ early_init_mmu_secondary() 989 * -1 - critical hash insertion error 1167 /* Dump some info in case of hash insertion failure, they should hash_page_mm() 1242 * hash preload there. Hence can ignore THP here hash_preload() 1274 /* Dump some info in case of hash insertion failure, they should hash_preload() 1292 unsigned long hash, index, shift, hidx, slot; flush_hash_page() local 1297 hash = hpt_hash(vpn, shift, ssize); pte_iterate_hashed_subpages() 1300 hash = ~hash; pte_iterate_hashed_subpages() 1301 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; pte_iterate_hashed_subpages() 1303 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); pte_iterate_hashed_subpages() 1336 unsigned long hidx, shift, vpn, hash, slot; flush_hash_hugepage() local 1372 hash = hpt_hash(vpn, shift, ssize); flush_hash_hugepage() 1374 hash = ~hash; flush_hash_hugepage() 1376 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; flush_hash_hugepage() 1417 * low_hash_fault is called when we the low level hash code failed 1437 long hpte_insert_repeating(unsigned long hash, unsigned long vpn, hpte_insert_repeating() argument 1445 hpte_group = ((hash & htab_hash_mask) * hpte_insert_repeating() 1448 /* Insert into the hash table, primary slot */ hpte_insert_repeating() 1454 hpte_group = ((~hash & htab_hash_mask) * hpte_insert_repeating() 1461 hpte_group = ((hash & htab_hash_mask) * hpte_insert_repeating() 1475 unsigned long hash; kernel_map_linear_page() local 1481 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); kernel_map_linear_page() 1487 ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode, kernel_map_linear_page() 1500 unsigned long hash, hidx, slot; kernel_unmap_linear_page() local 1504 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); kernel_unmap_linear_page() 1511 hash = ~hash; kernel_unmap_linear_page() 1512 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; kernel_unmap_linear_page() 1547 * non-LPAR 64-bit hash MMU systems don't have a limitation setup_initial_memory_limit()
|
H A D | hash_native_64.c | 300 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less native_hpte_updatepp() 340 unsigned long hash; native_hpte_find() local 345 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); native_hpte_find() 349 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; native_hpte_find() 407 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); native_hpte_invalidate() 415 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less native_hpte_invalidate() 443 unsigned long hidx, vpn = 0, hash, slot; native_hugepage_invalidate() local 458 hash = hpt_hash(vpn, shift, ssize); native_hugepage_invalidate() 460 hash = ~hash; native_hugepage_invalidate() 462 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; native_hugepage_invalidate() 633 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing 639 unsigned long hash, index, hidx, shift, slot; native_flush_hash_range() local 657 hash = hpt_hash(vpn, shift, ssize); pte_iterate_hashed_subpages() 660 hash = ~hash; pte_iterate_hashed_subpages() 661 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; pte_iterate_hashed_subpages()
|
/linux-4.1.27/net/sched/ |
H A D | cls_tcindex.c | 23 #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ 41 struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ 42 struct tcindex_filter __rcu **h; /* imperfect hash; */ 46 u32 hash; /* hash table size; 0 if undefined */ member in struct:tcindex_data 69 fp = &p->h[key % p->hash]; tcindex_lookup() 129 p->hash = DEFAULT_HASH_SIZE; tcindex_init() 151 for (i = 0; i < p->hash; i++) { tcindex_delete() 190 return p->hash > (p->mask >> p->shift); valid_perfect_hash() 236 * perfect hash and hash pointers from old data. tcindex_set_parms() 244 cp->hash = p->hash; tcindex_set_parms() 253 sizeof(*r) * cp->hash, GFP_KERNEL); tcindex_set_parms() 256 for (i = 0; i < cp->hash; i++) tcindex_set_parms() 269 cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); tcindex_set_parms() 280 * requirements for the allocated hash. tcindex_set_parms() 284 cp->hash > cp->alloc_hash) tcindex_set_parms() 286 } else if (cp->h && cp->hash != cp->alloc_hash) { tcindex_set_parms() 294 if (!cp->hash) { tcindex_set_parms() 295 /* Hash not specified, use perfect hash if the upper limit tcindex_set_parms() 299 cp->hash = (cp->mask >> cp->shift) + 1; tcindex_set_parms() 301 cp->hash = DEFAULT_HASH_SIZE; tcindex_set_parms() 305 cp->alloc_hash = cp->hash; tcindex_set_parms() 322 cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); tcindex_set_parms() 325 for (i = 0; i < cp->hash; i++) tcindex_set_parms() 331 struct tcindex_filter __rcu **hash; tcindex_set_parms() local 333 hash = kcalloc(cp->hash, tcindex_set_parms() 337 if (!hash) tcindex_set_parms() 340 cp->h = hash; tcindex_set_parms() 382 fp = cp->h + (handle % cp->hash); tcindex_set_parms() 440 for (i = 0; i < p->hash; i++) { tcindex_walk() 456 for (i = 0; i < p->hash; i++) { tcindex_walk() 508 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || tcindex_dump() 523 for (i = 0; !t->tcm_handle && i < p->hash; i++) { tcindex_dump()
|
H A D | sch_sfq.c | 57 When hash collisions occur, several flows are considered as one. 74 - number of hash buckets to 65536. 110 unsigned short hash; /* hash value (index in ht[]) */ member in struct:sfq_slot 120 unsigned int divisor; /* number of slots in hash table */ 177 unsigned int hash; sfq_hash() local 179 hash = jhash_3words((__force u32)keys->dst, sfq_hash() 182 return hash & (q->divisor - 1); sfq_hash() 344 q->ht[slot->hash] = SFQ_EMPTY_SLOT; sfq_drop() 372 unsigned int hash; sfq_enqueue() local 379 hash = sfq_classify(skb, sch, &ret); sfq_enqueue() 380 if (hash == 0) { sfq_enqueue() 386 hash--; sfq_enqueue() 388 x = q->ht[hash]; sfq_enqueue() 394 q->ht[hash] = x; sfq_enqueue() 396 slot->hash = hash; sfq_enqueue() 527 q->ht[slot->hash] = SFQ_EMPTY_SLOT; sfq_dequeue() 577 q->ht[slot->hash] = SFQ_EMPTY_SLOT; sfq_rehash() 582 unsigned int hash = sfq_hash(q, skb); sfq_rehash() local 583 sfq_index x = q->ht[hash]; sfq_rehash() 595 q->ht[hash] = x; sfq_rehash() 597 slot->hash = hash; sfq_rehash()
|
H A D | sch_sfb.c | 34 * This permits us to split one 32bit hash (provided per packet by rxhash or 48 /* We use a double buffering right before hash change 49 * (Section 4.4 of SFB reference : moving hash functions) 88 * We store in skb_cb the two hash values. 102 * If using 'internal' SFB flow classifier, hash comes from skb rxhash 103 * If using external classifier, hash comes from the classid. 132 u32 hash = sfbhash & SFB_BUCKET_MASK; increment_one_qlen() local 135 if (b[hash].qlen < 0xFFFF) increment_one_qlen() 136 b[hash].qlen++; increment_one_qlen() 161 u32 hash = sfbhash & SFB_BUCKET_MASK; decrement_one_qlen() local 164 if (b[hash].qlen > 0) decrement_one_qlen() 165 b[hash].qlen--; decrement_one_qlen() 333 u32 hash = sfbhash & SFB_BUCKET_MASK; sfb_enqueue() local 334 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfb_enqueue() 368 u32 hash = sfbhash & SFB_BUCKET_MASK; sfb_enqueue() local 369 struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfb_enqueue()
|
H A D | sch_hhf.c | 33 * Conceptually, a multi-stage filter comprises k independent hash functions 34 * and k counter arrays. Packets are indexed into k counter arrays by k hash 39 * due to hash collision with other small flows; however, with high 77 * - T is a fixed-size hash-table with 1024 entries. Hash collision is 83 * Hence, instead of having four hash functions, we chop the 32-bit 84 * skb-hash into three 10-bit chunks, and the remaining 10-bit chunk is 114 u32 hash_id; /* hash of flow-id (e.g. TCP 5-tuple) */ 116 struct list_head flowchain; /* chaining under hash collision */ 129 u32 perturbation; /* hash perturbation */ 183 unsigned int hash; skb_hash() local 189 hash = jhash_3words((__force u32)keys.dst, skb_hash() 192 return hash; skb_hash() 196 static struct hh_flow_state *seek_list(const u32 hash, seek_list() argument 218 } else if (flow->hash_id == hash) { list_for_each_entry_safe() 266 u32 tmp_hash, hash; hhf_classify() local 283 hash = skb_hash(q, skb); hhf_classify() 286 flow_pos = hash & HHF_BIT_MASK; hhf_classify() 287 flow = seek_list(hash, &q->hh_flows[flow_pos], q); hhf_classify() 294 tmp_hash = hash; hhf_classify() 326 flow->hash_id = hash; hhf_classify()
|
/linux-4.1.27/drivers/md/ |
H A D | dm-cache-policy-cleaner.c | 12 #include <linux/hash.h> 32 struct hash { struct 53 struct hash chash; 83 static int alloc_hash(struct hash *hash, unsigned elts) alloc_hash() argument 85 hash->nr_buckets = next_power(elts >> 4, 16); alloc_hash() 86 hash->hash_bits = ffs(hash->nr_buckets) - 1; alloc_hash() 87 hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets); alloc_hash() 89 return hash->table ? 0 : -ENOMEM; alloc_hash() 92 static void free_hash(struct hash *hash) free_hash() argument 94 vfree(hash->table); free_hash() 110 /* Cache entries hash. */ alloc_cache_blocks_with_hash() 142 struct hash *hash = &p->chash; lookup_cache_entry() local 143 unsigned h = hash_64(from_oblock(oblock), hash->hash_bits); lookup_cache_entry() 145 struct hlist_head *bucket = &hash->table[h]; lookup_cache_entry()
|
H A D | dm-verity.c | 12 * hash device. Setting this greatly improves performance when data and hash 22 #include <crypto/hash.h> 65 sector_t hash_start; /* hash start in blocks */ 67 sector_t hash_blocks; /* the number of hash blocks */ 69 unsigned char hash_dev_block_bits; /* log2(hash blocksize) */ 70 unsigned char hash_per_block_bits; /* log2(hashes in hash block) */ 73 unsigned digest_size; /* digest size for the current hash algorithm */ 75 int hash_failed; /* set to 1 if hash of any block failed */ 136 * hash_verified is nonzero, hash of the block has been verified. 142 * that multiple processes verify the hash of the same buffer simultaneously 169 * Return hash position of a specified block at a specified tree level 171 * The lowest "hash_per_block_bits"-bits of the result denote hash position 172 * inside a hash block. The remaining bits denote location of the hash block. 250 * Verify hash of a metadata block pertaining to the specified data block 253 * On successful return, io_want_digest(v, io) contains the hash value for 367 * First, we try to get the requested hash for verity_verify_io() 368 * the current block. If the hash block itself is verity_verify_io() 722 * <hash device> 724 * <hash block size> 726 * <hash start block> 801 ti->error = "Invalid hash device block size"; verity_ctr() 825 ti->error = "Invalid hash start"; verity_ctr() 840 ti->error = "Cannot initialize hash function"; verity_ctr()
|
/linux-4.1.27/kernel/bpf/ |
H A D | hashtab.c | 22 u32 n_buckets; /* number of hash buckets */ 30 u32 hash; member in struct:htab_elem 57 /* hash table size must be power of 2 */ htab_map_alloc() 103 static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) select_bucket() argument 105 return &htab->buckets[hash & (htab->n_buckets - 1)]; select_bucket() 108 static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, lookup_elem_raw() argument 114 if (l->hash == hash && !memcmp(&l->key, key, key_size)) lookup_elem_raw() 126 u32 hash, key_size; htab_map_lookup_elem() local 133 hash = htab_map_hash(key, key_size); htab_map_lookup_elem() 135 head = select_bucket(htab, hash); htab_map_lookup_elem() 137 l = lookup_elem_raw(head, hash, key, key_size); htab_map_lookup_elem() 151 u32 hash, key_size; htab_map_get_next_key() local 158 hash = htab_map_hash(key, key_size); htab_map_get_next_key() 160 head = select_bucket(htab, hash); htab_map_get_next_key() 163 l = lookup_elem_raw(head, hash, key, key_size); htab_map_get_next_key() 175 /* if next elem in this hash list is non-zero, just return it */ htab_map_get_next_key() 180 /* no more elements in this hash list, go to the next bucket */ htab_map_get_next_key() 181 i = hash & (htab->n_buckets - 1); htab_map_get_next_key() 230 l_new->hash = htab_map_hash(l_new->key, key_size); htab_map_update_elem() 235 head = select_bucket(htab, l_new->hash); htab_map_update_elem() 237 l_old = lookup_elem_raw(head, l_new->hash, key, key_size); htab_map_update_elem() 285 u32 hash, key_size; htab_map_delete_elem() local 292 hash = htab_map_hash(key, key_size); htab_map_delete_elem() 296 head = select_bucket(htab, hash); htab_map_delete_elem() 298 l = lookup_elem_raw(head, hash, key, key_size); htab_map_delete_elem()
|
/linux-4.1.27/fs/ubifs/ |
H A D | key.h | 33 * 4KiB offset in case of inode node, and direntry hash in case of a direntry 34 * node. We use "r5" hash borrowed from reiserfs. 41 * key_mask_hash - mask a valid hash value. 44 * We use hash values as offset in directories, so values %0 and %1 are 48 static inline uint32_t key_mask_hash(uint32_t hash) key_mask_hash() argument 50 hash &= UBIFS_S_KEY_HASH_MASK; key_mask_hash() 51 if (unlikely(hash <= 2)) key_mask_hash() 52 hash += 3; key_mask_hash() 53 return hash; key_mask_hash() 57 * key_r5_hash - R5 hash function (borrowed from reiserfs). 77 * key_test_hash - testing hash function. 156 uint32_t hash = c->key_hash(nm->name, nm->len); dent_key_init() local 158 ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); dent_key_init() 160 key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS); dent_key_init() 165 * hash function. 169 * @hash: direntry name hash 173 uint32_t hash) dent_key_init_hash() 175 ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); dent_key_init_hash() 177 key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS); dent_key_init_hash() 191 uint32_t hash = c->key_hash(nm->name, nm->len); dent_key_init_flash() local 193 ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); dent_key_init_flash() 195 key->j32[1] = cpu_to_le32(hash | dent_key_init_flash() 224 uint32_t hash = c->key_hash(nm->name, nm->len); xent_key_init() local 226 ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); xent_key_init() 228 key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS); xent_key_init() 242 uint32_t hash = c->key_hash(nm->name, nm->len); xent_key_init_flash() local 244 ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK)); xent_key_init_flash() 246 key->j32[1] = cpu_to_le32(hash | xent_key_init_flash() 370 * key_hash - get directory entry hash. 372 * @key: the key to get hash from 381 * key_hash_flash - get directory entry hash from an on-flash formatted key. 383 * @k: the key to get hash from 520 * is_hash_key - is a key vulnerable to hash collisions. 171 dent_key_init_hash(const struct ubifs_info *c, union ubifs_key *key, ino_t inum, uint32_t hash) dent_key_init_hash() argument
|
/linux-4.1.27/net/netfilter/ |
H A D | nf_conntrack_core.c | 78 /* return true if we need to recompute hashes (in case hash table was resized) */ nf_conntrack_double_lock() 133 /* The direction must be ignored, so we hash everything up to the hash_conntrack_raw() 143 static u32 __hash_bucket(u32 hash, unsigned int size) __hash_bucket() argument 145 return reciprocal_scale(hash, size); __hash_bucket() 148 static u32 hash_bucket(u32 hash, const struct net *net) hash_bucket() argument 150 return __hash_bucket(hash, net->ct.htable_size); hash_bucket() 331 unsigned int hash, reply_hash; nf_ct_delete_from_lists() local 340 hash = hash_conntrack(net, zone, nf_ct_delete_from_lists() 344 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); nf_ct_delete_from_lists() 347 nf_conntrack_double_unlock(hash, reply_hash); nf_ct_delete_from_lists() 410 const struct nf_conntrack_tuple *tuple, u32 hash) ____nf_conntrack_find() 414 unsigned int bucket = hash_bucket(hash, net); ____nf_conntrack_find() 421 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) { ____nf_conntrack_find() 446 const struct nf_conntrack_tuple *tuple, u32 hash) __nf_conntrack_find_get() 453 h = ____nf_conntrack_find(net, zone, tuple, hash); __nf_conntrack_find_get() 481 unsigned int hash, __nf_conntrack_hash_insert() 487 &net->ct.hash[hash]); __nf_conntrack_hash_insert() 489 &net->ct.hash[reply_hash]); __nf_conntrack_hash_insert() 496 unsigned int hash, reply_hash; nf_conntrack_hash_check_insert() local 507 hash = hash_conntrack(net, zone, nf_conntrack_hash_check_insert() 511 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); nf_conntrack_hash_check_insert() 514 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) nf_conntrack_hash_check_insert() 519 hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) nf_conntrack_hash_check_insert() 529 __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_hash_check_insert() 530 nf_conntrack_double_unlock(hash, reply_hash); nf_conntrack_hash_check_insert() 536 nf_conntrack_double_unlock(hash, reply_hash); nf_conntrack_hash_check_insert() 565 /* Confirm a connection given skb; places it in hash table */ 569 unsigned int hash, reply_hash; __nf_conntrack_confirm() local 595 /* reuse the hash saved before */ __nf_conntrack_confirm() 596 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; __nf_conntrack_confirm() 597 hash = hash_bucket(hash, net); __nf_conntrack_confirm() 601 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); __nf_conntrack_confirm() 603 /* We're not in hash table, and we refuse to set up related __nf_conntrack_confirm() 616 * user context, else we insert an already 'dead' hash, blocking __nf_conntrack_confirm() 626 not in the hash. If there is, we lost race. */ __nf_conntrack_confirm() 627 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) __nf_conntrack_confirm() 632 hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode) __nf_conntrack_confirm() 654 /* Since the lookup is lockless, hash insertion must be done after __nf_conntrack_confirm() 659 __nf_conntrack_hash_insert(ct, hash, reply_hash); __nf_conntrack_confirm() 660 nf_conntrack_double_unlock(hash, reply_hash); __nf_conntrack_confirm() 674 nf_conntrack_double_unlock(hash, reply_hash); __nf_conntrack_confirm() 692 unsigned int hash = hash_conntrack(net, zone, tuple); nf_conntrack_tuple_taken() local 698 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { nf_conntrack_tuple_taken() 727 unsigned int hash, sequence; early_drop() local 733 hash = hash_bucket(_hash, net); early_drop() 735 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; early_drop() 741 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], early_drop() 753 hash = (hash + 1) % net->ct.htable_size; early_drop() 794 gfp_t gfp, u32 hash) __nf_conntrack_alloc() 800 /* recompute the hash as nf_conntrack_hash_rnd is initialized */ __nf_conntrack_alloc() 801 hash = hash_conntrack_raw(orig, zone); __nf_conntrack_alloc() 809 if (!early_drop(net, hash)) { __nf_conntrack_alloc() 829 /* save hash for reusing when confirming */ __nf_conntrack_alloc() 830 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; __nf_conntrack_alloc() 897 unsigned int dataoff, u32 hash) init_conntrack() 914 hash); init_conntrack() 1011 u32 hash; resolve_normal_ct() local 1021 hash = hash_conntrack_raw(&tuple, zone); resolve_normal_ct() 1022 h = __nf_conntrack_find_get(net, zone, &tuple, hash); resolve_normal_ct() 1025 skb, dataoff, hash); resolve_normal_ct() 1187 /* Should be unconfirmed, so not in hash table yet */ nf_conntrack_alter_reply() 1217 /* If not in hash table, timer will not be active yet */ __nf_ct_refresh_acct() 1362 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { get_next_corpse() 1417 void nf_ct_free_hashtable(void *hash, unsigned int size) nf_ct_free_hashtable() argument 1419 if (is_vmalloc_addr(hash)) nf_ct_free_hashtable() 1420 vfree(hash); nf_ct_free_hashtable() 1422 free_pages((unsigned long)hash, nf_ct_free_hashtable() 1500 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); list_for_each_entry() 1516 struct hlist_nulls_head *hash; nf_ct_alloc_hashtable() local 1523 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, nf_ct_alloc_hashtable() 1525 if (!hash) { nf_ct_alloc_hashtable() 1527 hash = vzalloc(sz); nf_ct_alloc_hashtable() 1530 if (hash && nulls) nf_ct_alloc_hashtable() 1532 INIT_HLIST_NULLS_HEAD(&hash[i], i); nf_ct_alloc_hashtable() 1534 return hash; nf_ct_alloc_hashtable() 1542 struct hlist_nulls_head *hash, *old_hash; nf_conntrack_set_hashsize() local 1559 hash = nf_ct_alloc_hashtable(&hashsize, 1); nf_conntrack_set_hashsize() 1560 if (!hash) nf_conntrack_set_hashsize() 1567 /* Lookups in the old hash might happen in parallel, which means we nf_conntrack_set_hashsize() 1569 * created because of a false negative won't make it into the hash nf_conntrack_set_hashsize() 1574 while (!hlist_nulls_empty(&init_net.ct.hash[i])) { nf_conntrack_set_hashsize() 1575 h = hlist_nulls_entry(init_net.ct.hash[i].first, nf_conntrack_set_hashsize() 1581 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); nf_conntrack_set_hashsize() 1585 old_hash = init_net.ct.hash; nf_conntrack_set_hashsize() 1588 init_net.ct.hash = hash; nf_conntrack_set_hashsize() 1730 * We need to use special "null" values, not used in hash table 1776 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); 1777 if (!net->ct.hash) { 1812 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 409 ____nf_conntrack_find(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, u32 hash) ____nf_conntrack_find() argument 445 __nf_conntrack_find_get(struct net *net, u16 zone, const struct nf_conntrack_tuple *tuple, u32 hash) __nf_conntrack_find_get() argument 480 __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) __nf_conntrack_hash_insert() argument 791 __nf_conntrack_alloc(struct net *net, u16 zone, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_tuple *repl, gfp_t gfp, u32 hash) __nf_conntrack_alloc() argument 892 init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, struct nf_conntrack_l3proto *l3proto, struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff, u32 hash) init_conntrack() argument
|
H A D | xt_cluster.c | 45 u_int32_t hash = 0; xt_cluster_hash() local 49 hash = xt_cluster_hash_ipv4(nf_ct_orig_ipv4_src(ct), info); xt_cluster_hash() 52 hash = xt_cluster_hash_ipv6(nf_ct_orig_ipv6_src(ct), info); xt_cluster_hash() 59 return reciprocal_scale(hash, info->total_nodes); xt_cluster_hash() 96 unsigned long hash; xt_cluster_mt() local 128 hash = xt_cluster_hash(ct->master, info); xt_cluster_mt() 130 hash = xt_cluster_hash(ct, info); xt_cluster_mt() 132 return !!((1 << hash) & info->node_mask) ^ xt_cluster_mt() 175 MODULE_DESCRIPTION("Xtables: hash-based cluster match");
|
H A D | xt_HMARK.c | 30 MODULE_DESCRIPTION("Xtables: packet marking using hash calculation"); 114 /* This hash function is endian independent, to ensure consistent hashing if 119 u32 hash; hmark_hash() local 126 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); hmark_hash() 127 hash = hash ^ (t->proto & info->proto_mask); hmark_hash() 129 return reciprocal_scale(hash, info->hmodulus) + info->hoffset; hmark_hash() 317 pr_info("xt_HMARK: hash modulus can't be zero\n"); hmark_tg_check()
|
/linux-4.1.27/include/net/ |
H A D | udp.h | 55 * struct udp_hslot - UDP hash slot 70 * @hash: hash table, sockets are hashed on (local port) 71 * @hash2: hash table, sockets are hashed on (local port, local address) 72 * @mask: number of slots in hash tables, minus 1 73 * @log: log2(number of slots in hash table) 76 struct udp_hslot *hash; member in struct:udp_table 86 return &table->hash[udp_hashfn(net, num, table->mask)]; udp_hashslot() 89 * For secondary hash, net_hash_mix() is performed before calling 93 unsigned int hash) udp_hashslot2() 95 return &table->hash2[hash & table->mask]; udp_hashslot2() 179 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ udp_lib_hash() 202 u32 hash; udp_flow_src_port() local 209 hash = skb_get_hash(skb); udp_flow_src_port() 210 if (unlikely(!hash)) { udp_flow_src_port() 212 /* Can't find a normal hash, caller has indicated an udp_flow_src_port() 213 * Ethernet packet so use that to compute a hash. udp_flow_src_port() 215 hash = jhash(skb->data, 2 * ETH_ALEN, udp_flow_src_port() 218 /* Can't derive any sort of hash for the packet, set udp_flow_src_port() 221 hash = udp_flow_hashrnd(); udp_flow_src_port() 225 /* Since this is being sent on the wire obfuscate hash a bit udp_flow_src_port() 230 hash ^= hash << 16; udp_flow_src_port() 232 return htons((((u64) hash * (max - min)) >> 32) + min); udp_flow_src_port() 92 udp_hashslot2(struct udp_table *table, unsigned int hash) udp_hashslot2() argument
|
H A D | bond_alb.h | 49 #define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table. 51 * because the key hash table is BYTE wide ! 97 * Note that this is not a proper hash table; if a new client's IP address 98 * hash collides with an existing client entry, the old entry is replaced. 101 * linking all the used entries of the hash table. This allows updating 104 * There are also linked lists of entries with identical hash(ip_src). These 116 /* list of used hash table entries, starting at rx_hashtbl_used_head */ 121 u32 src_next; /* next entry with same hash(ip_src) */ 122 u32 src_prev; /* prev entry with same hash(ip_src) */ 123 u32 src_first; /* first entry with hash(ip_src) == this entry's index */ 133 * hash table entries list. The entries in the list 149 struct rlb_client_info *rx_hashtbl; /* Receive hash table */
|
H A D | flow_keys.h | 6 * For IPv6 it contains 32bit hash of src address 8 * For IPv6 it contains 32bit hash of dst address
|
H A D | inet_frag.h | 38 * @list: hash bucket list 79 struct inet_frag_bucket hash[INETFRAGS_HASHSZ]; member in struct:inet_frags 89 * rnd_seqlock is used to let hash insertion detect 90 * when it needs to re-lookup the hash chain to use. 117 struct inet_frags *f, void *key, unsigned int hash);
|
/linux-4.1.27/drivers/crypto/caam/ |
H A D | compat.h | 14 #include <linux/hash.h> 39 #include <crypto/internal/hash.h>
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | zImage.lds.S | 36 .hash : { *(.hash) }
|
/linux-4.1.27/net/netfilter/ipvs/ |
H A D | ip_vs_sh.c | 16 * The sh algorithm is to select server by the hash key of source IP 26 * Notes that servernode is a 256-bucket hash table that maps the hash 64 * for IPVS SH entry hash table 86 * Returns hash value for IPVS SH entry 111 unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0); ip_vs_sh_get() local 112 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); ip_vs_sh_get() 121 * point (in fact, it is chosen to be the original hash value to make the 129 unsigned int hash, ihash; ip_vs_sh_get_fallback() local 148 hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset); ip_vs_sh_get_fallback() 149 dest = rcu_dereference(s->buckets[hash].dest); ip_vs_sh_get_fallback() 164 * Assign all the hash buckets of the specified table with the service. 212 * Flush all the hash buckets of the specified table. 242 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for " ip_vs_sh_init_svc() 246 /* assign the hash buckets with current dests */ ip_vs_sh_init_svc() 257 /* got to clean up hash buckets here */ ip_vs_sh_done_svc() 262 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n", ip_vs_sh_done_svc() 272 /* assign the hash buckets with the updated service */ ip_vs_sh_dest_changed()
|
H A D | ip_vs_proto.c | 51 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); register_ip_vs_protocol() local 53 pp->next = ip_vs_proto_table[hash]; register_ip_vs_protocol() 54 ip_vs_proto_table[hash] = pp; register_ip_vs_protocol() 69 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); register_ip_vs_proto_netns() local 77 pd->next = ipvs->proto_data_table[hash]; register_ip_vs_proto_netns() 78 ipvs->proto_data_table[hash] = pd; register_ip_vs_proto_netns() 85 ipvs->proto_data_table[hash] = pd->next; register_ip_vs_proto_netns() 100 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); unregister_ip_vs_protocol() local 102 pp_p = &ip_vs_proto_table[hash]; unregister_ip_vs_protocol() 123 unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); unregister_ip_vs_proto_netns() local 125 pd_p = &ipvs->proto_data_table[hash]; unregister_ip_vs_proto_netns() 145 unsigned int hash = IP_VS_PROTO_HASH(proto); ip_vs_proto_get() local 147 for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { ip_vs_proto_get() 163 unsigned int hash = IP_VS_PROTO_HASH(proto); __ipvs_proto_data_get() local 165 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { __ipvs_proto_data_get()
|
H A D | ip_vs_dh.c | 19 * The dh algorithm is to select server by the hash key of destination IP 29 * Notes that servernode is a 256-bucket hash table that maps the hash 58 * for IPVS DH entry hash table 73 * Returns hash value for IPVS DH entry 99 * Assign all the hash buckets of the specified table with the service. 136 * Flush all the hash buckets of the specified table. 166 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " ip_vs_dh_init_svc() 170 /* assign the hash buckets with current dests */ ip_vs_dh_init_svc() 181 /* got to clean up hash buckets here */ ip_vs_dh_done_svc() 186 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n", ip_vs_dh_done_svc() 196 /* assign the hash buckets with the updated service */ ip_vs_dh_dest_changed()
|
H A D | ip_vs_conn.c | 50 * Connection hash size. Default is what was selected at compile time. 54 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size"); 61 * Connection hash table: for input and output packets lookups of IPVS 71 /* random value for IPVS connection hash */ 75 * Fine locking granularity for big connection hash table 109 * Returns hash value for IPVS connection entry 169 unsigned int hash; ip_vs_conn_hash() local 176 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_hash() 178 ct_write_lock_bh(hash); ip_vs_conn_hash() 184 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); ip_vs_conn_hash() 193 ct_write_unlock_bh(hash); ip_vs_conn_hash() 205 unsigned int hash; ip_vs_conn_unhash() local 209 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_unhash() 211 ct_write_lock_bh(hash); ip_vs_conn_unhash() 223 ct_write_unlock_bh(hash); ip_vs_conn_unhash() 233 unsigned int hash; ip_vs_conn_unlink() local 236 hash = ip_vs_conn_hashkey_conn(cp); ip_vs_conn_unlink() 238 ct_write_lock_bh(hash); ip_vs_conn_unlink() 253 ct_write_unlock_bh(hash); ip_vs_conn_unlink() 268 unsigned int hash; __ip_vs_conn_in_get() local 271 hash = ip_vs_conn_hashkey_param(p, false); __ip_vs_conn_in_get() 275 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { __ip_vs_conn_in_get() 353 unsigned int hash; ip_vs_ct_in_get() local 356 hash = ip_vs_conn_hashkey_param(p, false); ip_vs_ct_in_get() 360 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_ct_in_get() 405 unsigned int hash; ip_vs_conn_out_get() local 411 hash = ip_vs_conn_hashkey_param(p, true); ip_vs_conn_out_get() 415 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_conn_out_get() 481 /* hash on new dport */ ip_vs_conn_fill_cport() 870 * Create a new connection entry and hash it into the ip_vs_conn_tab 1028 /* more on same hash chain? */ ip_vs_conn_seq_next() 1253 unsigned int hash = prandom_u32() & ip_vs_conn_tab_mask; ip_vs_random_dropentry() local 1255 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) { ip_vs_random_dropentry() 1376 * Allocate the connection hash table and initialize its list heads ip_vs_conn_init() 1391 pr_info("Connection hash table configured " ip_vs_conn_init() 1405 /* calculate the random value for connection hash */ ip_vs_conn_init()
|
/linux-4.1.27/scripts/mod/ |
H A D | sumversion.c | 41 uint32_t hash[MD4_HASH_WORDS]; member in struct:md4_ctx 88 static void md4_transform(uint32_t *hash, uint32_t const *in) md4_transform() argument 92 a = hash[0]; md4_transform() 93 b = hash[1]; md4_transform() 94 c = hash[2]; md4_transform() 95 d = hash[3]; md4_transform() 148 hash[0] += a; md4_transform() 149 hash[1] += b; md4_transform() 150 hash[2] += c; md4_transform() 151 hash[3] += d; md4_transform() 157 md4_transform(ctx->hash, ctx->block); md4_transform_helper() 162 mctx->hash[0] = 0x67452301; md4_init() 163 mctx->hash[1] = 0xefcdab89; md4_init() 164 mctx->hash[2] = 0x98badcfe; md4_init() 165 mctx->hash[3] = 0x10325476; md4_init() 218 md4_transform(mctx->hash, mctx->block); md4_final_ascii() 219 cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(uint32_t)); md4_final_ascii() 222 mctx->hash[0], mctx->hash[1], mctx->hash[2], mctx->hash[3]); md4_final_ascii()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/ |
H A D | linux-crypto.c | 35 * Array of hash algorithm speed in MByte per second 51 CWARN("Unsupported hash algorithm id = %d, max id is %d\n", cfs_crypto_hash_alloc() 61 CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", cfs_crypto_hash_alloc() 87 CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", cfs_crypto_hash_alloc() 98 unsigned char *hash, unsigned int *hash_len) cfs_crypto_hash_digest() 112 if (hash == NULL || *hash_len < type->cht_size) { cfs_crypto_hash_digest() 120 err = crypto_hash_digest(&hdesc, &sl, sl.length, hash); cfs_crypto_hash_digest() 176 unsigned char *hash, unsigned int *hash_len) cfs_crypto_hash_final() 186 if (hash == NULL || *hash_len < size) { cfs_crypto_hash_final() 190 err = crypto_hash_final((struct hash_desc *) hdesc, hash); cfs_crypto_hash_final() 209 unsigned char hash[64]; cfs_crypto_performance_test() local 215 hash, &hash_len); cfs_crypto_performance_test() 224 CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n", cfs_crypto_performance_test() 232 CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n", cfs_crypto_performance_test() 246 * Do performance test for all hash algorithms. 253 /* Data block size for testing hash. Maximum cfs_crypto_test_hashes() 95 cfs_crypto_hash_digest(unsigned char alg_id, const void *buf, unsigned int buf_len, unsigned char *key, unsigned int key_len, unsigned char *hash, unsigned int *hash_len) cfs_crypto_hash_digest() argument 175 cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, unsigned char *hash, unsigned int *hash_len) cfs_crypto_hash_final() argument
|
H A D | linux-crypto-adler.c | 34 #include <crypto/internal/hash.h> 54 static int adler32_setkey(struct crypto_shash *hash, const u8 *key, adler32_setkey() argument 57 u32 *mctx = crypto_shash_ctx(hash); adler32_setkey() 60 crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); adler32_setkey()
|
/linux-4.1.27/arch/x86/um/vdso/ |
H A D | vdso-layout.lds.S | 11 .hash : { *(.hash) } :text 12 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/include/uapi/linux/ |
H A D | reiserfs_xattr.h | 15 __le32 h_hash; /* hash of the value */
|
/linux-4.1.27/arch/arm/vdso/ |
H A D | vdso.lds.S | 37 .hash : { *(.hash) } :text 38 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/include/linux/ceph/ |
H A D | ceph_hash.h | 4 #define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
|
/linux-4.1.27/drivers/net/team/ |
H A D | team_mode_loadbalance.c | 27 #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */ 82 #define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \ 83 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port 85 #define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \ 86 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info 109 /* Basic tx selection based solely by hash */ lb_hash_select_tx_port() 113 unsigned char hash) lb_hash_select_tx_port() 115 int port_index = team_num_to_port_index(team, hash); lb_hash_select_tx_port() 124 unsigned char hash) lb_htpm_select_tx_port() 126 return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); lb_htpm_select_tx_port() 136 .name = "hash", 191 unsigned char hash) lb_update_tx_stats() 199 hash_stats = &pcpu_stats->hash_stats[hash]; lb_update_tx_stats() 211 unsigned char hash; lb_transmit() local 214 hash = lb_get_skb_hash(lb_priv, skb); lb_transmit() 216 port = select_tx_port_func(team, lb_priv, skb, hash); lb_transmit() 221 lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash); lb_transmit() 338 unsigned char hash = info->array_index; lb_tx_hash_to_port_mapping_init() local 340 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info; lb_tx_hash_to_port_mapping_init() 349 unsigned char hash = ctx->info->array_index; lb_tx_hash_to_port_mapping_get() local 351 port = LB_HTPM_PORT_BY_HASH(lb_priv, hash); lb_tx_hash_to_port_mapping_get() 361 unsigned char hash = ctx->info->array_index; lb_tx_hash_to_port_mapping_set() local 366 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash), lb_tx_hash_to_port_mapping_set() 378 unsigned char hash = info->array_index; lb_hash_stats_init() local 380 lb_priv->ex->stats.info[hash].opt_inst_info = info; lb_hash_stats_init() 387 unsigned char hash = ctx->info->array_index; lb_hash_stats_get() local 389 ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats; lb_hash_stats_get() 580 func = lb_select_tx_port_get_func("hash"); lb_init() 110 lb_hash_select_tx_port(struct team *team, struct lb_priv *lb_priv, struct sk_buff *skb, unsigned char hash) lb_hash_select_tx_port() argument 121 lb_htpm_select_tx_port(struct team *team, struct lb_priv *lb_priv, struct sk_buff *skb, unsigned char hash) lb_htpm_select_tx_port() argument 189 lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv, struct lb_port_priv *lb_port_priv, unsigned char hash) lb_update_tx_stats() argument
|
/linux-4.1.27/net/ipv6/ |
H A D | output_core.c | 15 u32 hash, id; __ipv6_select_ident() local 17 hash = __ipv6_addr_jhash(dst, hashrnd); __ipv6_select_ident() 18 hash = __ipv6_addr_jhash(src, hash); __ipv6_select_ident() 19 hash ^= net_hash_mix(net); __ipv6_select_ident() 25 id = ip_idents_reserve(hash, 1); __ipv6_select_ident() 35 * This is similar to ipv6_select_ident() but we use an independent hash
|
H A D | inet6_hashtables.c | 46 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so 65 unsigned int hash = inet6_ehashfn(net, daddr, hnum, saddr, sport); __inet6_lookup_established() local 66 unsigned int slot = hash & hashinfo->ehash_mask; __inet6_lookup_established() 73 if (sk->sk_hash != hash) __inet6_lookup_established() 131 unsigned int hash = inet_lhashfn(net, hnum); inet6_lookup_listener() local 132 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; inet6_lookup_listener() 161 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) inet6_lookup_listener() 203 const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, __inet6_check_established() local 205 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); __inet6_check_established() 206 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); __inet6_check_established() 215 if (sk2->sk_hash != hash) __inet6_check_established() 229 * in hash table socket with a funny identity. __inet6_check_established() 233 sk->sk_hash = hash; __inet6_check_established() 248 /* Silly. Should hash-dance instead... */ __inet6_check_established()
|
/linux-4.1.27/Documentation/dvb/ |
H A D | get_dvb_firmware | 65 my $hash = "53970ec17a538945a6d8cb608a7b3899"; 73 verify("$tmpdir/software/OEM/HE/App/boot/SC_MAIN.MC", $hash); 83 my $hash = "237938d53a7f834c05c42b894ca68ac3"; 93 verify("$tmpdir/ZEnglish/sc_main.mc", $hash); 102 my $hash = "2105fd5bf37842fbcdfa4bfd58f3594a"; 111 verify("$tmpdir/fwtmp", $hash); 120 my $hash = "6a7e1e2f2644b162ff0502367553c72d"; 129 verify("$tmpdir/fwtmp", $hash); 138 my $hash = "1ea24dee4eea8fe971686981f34fd2e0"; 147 verify("$tmpdir/fwtmp", $hash); 156 my $hash = "603431b6259715a8e88f376a53b64e2f"; 162 verify($sourcefile, $hash); 171 my $hash = "bd86f458cee4a8f0a8ce2d20c66215a9"; 179 verify("$tmpdir/software/OEM/STB/App/Boot/STB_PC_T.bin", $hash); 188 my $hash = "53e58f4f5b5c2930beee74a7681fed92"; 196 verify("$tmpdir/software/OEM/STB/App/Boot/STB_PC_X.bin", $hash); 205 my $hash = "b013ececea83f4d6d8d2a29ac7c1b448"; 213 verify("$tmpdir/software/OEM/STB/App/Boot/STB_PC_S.bin", $hash); 261 my $hash = "e88c9372d1f66609a3e7b072c53fbcfe"; 286 verify("$tmpdir/fwtmp3", $hash); 295 my $hash = "5609fd295168aea88b25ff43a6f79c36"; 300 verify($fwfile, $hash); 308 my $hash = "fa490295a527360ca16dcdf3224ca243"; 313 verify($outfile,$hash); 321 my $hash = "476befae8c7c1bb9648954060b1eec1f"; 329 verify("$tmpdir/SkyNET.sys", $hash); 338 my $hash = "111cb885b1e009188346d72acfed024c"; 346 verify("$tmpdir/3xHybrid.sys", $hash); 355 my $hash = "d830949c771a289505bf9eafc225d491"; 360 verify($fwfile, $hash); 368 my $hash = "7d3bb956dc9df0eafded2b56ba57cc42"; 373 verify($fwfile, $hash); 498 my $hash = "7702e8938612de46ccadfe9b413cb3b5"; 503 verify($fwfile, $hash); 511 my $hash = "c16208e02f36fc439a557ad4c613364a"; 516 verify($fwfile, $hash); 524 my $hash = "658397cb9eba9101af9031302671f49d"; 529 verify($outfile,$hash); 537 my $hash = "e3f08935158038d385ad382442f4bb2d"; 548 verify("$tmpdir/Driver/Files/AF15BDA.sys", $hash); 603 my $hash = "fc6017ad01e79890a97ec53bea157ed2"; 609 verify($sourcefile, $hash); 617 my $hash = "b0155a8083fb822a3bd47bc360e74601"; 623 verify($sourcefile, $hash); 631 my $hash = "7572ae0eb9cdf91baabd7c0ba9e09b31"; 637 verify($sourcefile, $hash); 646 my $hash = "f5a37b9a20a3534997997c0b1382a3e5"; 654 verify($zipfile, $hash); 664 my $hash = "83ab82e7e9480ec8bf1ae0155ca63c88"; 672 verify($zipfile, $hash); 681 my $hash = "19000dada8e2741162ccc50cc91fa7f1"; 687 verify($fwfile, $hash); 695 my $hash = "6722a2442a05423b781721fbc069ed5e"; 703 verify($zipfile, $hash); 735 my $hash = "4403de903bf2593464c8d74bbc200a57"; 742 verify($sourcefile, $hash); 752 my $hash = "4403de903bf2593464c8d74bbc200a57"; 759 verify($sourcefile, $hash); 791 my $hash = "76633e7c76b0edee47c3ba18ded99336"; 798 verify($sourcefile, $hash); 864 my ($filename, $hash) = @_; 872 die "Hash of extracted file does not match!\n" if ($testhash ne $hash);
|
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/ |
H A D | peer.c | 47 struct list_head *hash; lnet_peer_tables_create() local 61 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i, cfs_percpt_for_each() 62 LNET_PEER_HASH_SIZE * sizeof(*hash)); cfs_percpt_for_each() 63 if (hash == NULL) { cfs_percpt_for_each() 64 CERROR("Failed to create peer hash table\n"); cfs_percpt_for_each() 70 INIT_LIST_HEAD(&hash[j]); cfs_percpt_for_each() 71 ptable->pt_hash = hash; /* sign of initialization */ cfs_percpt_for_each() 81 struct list_head *hash; lnet_peer_tables_destroy() local 89 hash = ptable->pt_hash; cfs_percpt_for_each() 90 if (hash == NULL) /* not initialized */ cfs_percpt_for_each() 97 LASSERT(list_empty(&hash[j])); cfs_percpt_for_each() 99 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash)); cfs_percpt_for_each() 126 /* lose hash table's ref */ cfs_percpt_for_each() 268 lp->lp_refcount = 2; /* 1 for caller; 1 for hash */ lnet_nid2peer_locked()
|
/linux-4.1.27/net/netlabel/ |
H A D | netlabel_domainhash.c | 4 * This file manages the domain hash table that NetLabel uses to determine 52 /* Domain hash table */ 53 /* updates should be so rare that having one spinlock for the entire hash table 66 * netlbl_domhsh_free_entry - Frees a domain hash table entry 71 * function so that the memory allocated to a hash table entry can be released 105 * netlbl_domhsh_hash - Hashing function for the domain hash table 106 * @domain: the domain name to hash 109 * This is the hashing function for the domain hash table, it returns the 111 * ensuring that the hash table is protected with either a RCU read lock or the 112 * hash table lock. 134 * Searches the domain hash table and returns a pointer to the hash table 136 * ensuring that the hash table is protected with either a RCU read lock or the 137 * hash table lock. 163 * Searches the domain hash table and returns a pointer to the hash table 165 * hash table then the default entry is returned if valid otherwise NULL is 166 * returned. The caller is responsible ensuring that the hash table is 167 * protected with either a RCU read lock or the hash table lock. 316 * netlbl_domhsh_init - Init for the domain hash 317 * @size: the number of bits to use for the hash buckets 320 * Initializes the domain hash table, should be called only by 355 * netlbl_domhsh_add - Adds a entry to the domain hash table 360 * Adds a new entry to the domain hash table and handles any updates to the 476 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table 481 * Adds a new default entry to the domain hash table and handles any updates 498 * Removes an entry from the domain hash table and handles any updates to the 626 * netlbl_domhsh_remove - Removes an entry from the domain hash table 631 * Removes an entry from the domain hash table and handles any updates to the 657 * Removes/resets the default entry for the domain hash table and handles any 668 * netlbl_domhsh_getentry - Get an entry from the domain hash table 672 * Look through the domain hash table searching for an entry to match @domain, 683 * netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table 688 * Look through the domain hash table searching for an entry to match @domain 713 * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table 718 * Look through the domain hash table searching for an entry to match @domain 743 * netlbl_domhsh_walk - Iterate through the domain mapping hash table 750 * Interate over the domain mapping hash table, skipping the first @skip_bkt
|
H A D | netlabel_domainhash.h | 4 * This file manages the domain hash table that NetLabel uses to determine 40 /* Domain hash table size */ 83 /* Manipulate the domain hash table */
|
H A D | netlabel_unlabeled.c | 66 /* The unlabeled connection hash table which we use to map network interfaces 68 * LSM. The hash table is used to lookup the network interface entry 71 * match can not be found in the hash table then the default entry 113 /* Unlabeled connection hash table */ 115 * hash table should be okay */ 155 * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table 160 * function so that memory allocated to a hash table interface entry can be 196 * netlbl_unlhsh_hash - Hashing function for the hash table 197 * @ifindex: the network interface/device to hash 200 * This is the hashing function for the unlabeled hash table, it returns the 202 * ensuring that the hash table is protected with either a RCU read lock or 203 * the hash table lock. 216 * Searches the unlabeled connection hash table and returns a pointer to the 218 * caller is responsible for ensuring that the hash table is protected with 219 * either a RCU read lock or the hash table lock. 238 * netlbl_unlhsh_add_addr4 - Add a new IPv4 address entry to the hash table 245 * Add a new address entry into the unlabeled connection hash table using the 278 * netlbl_unlhsh_add_addr6 - Add a new IPv6 address entry to the hash table 285 * Add a new address entry into the unlabeled connection hash table using the 322 * netlbl_unlhsh_add_iface - Adds a new interface entry to the hash table 326 * Add a new, empty, interface entry into the unlabeled connection hash table. 369 * netlbl_unlhsh_add - Adds a new entry to the unlabeled connection hash table 379 * Adds a new entry to the unlabeled connection hash table. Returns zero on 481 * Remove an IP address entry from the unlabeled connection hash table. 543 * Remove an IP address entry from the unlabeled connection hash table. 600 * Remove an interface entry from the unlabeled connection hash table if it is 634 * netlbl_unlhsh_remove - Remove an entry from the unlabeled hash table 643 * Removes and existing entry from the unlabeled connection hash table. 715 * related entries from the unlabeled connection hash table. 891 * connection entry to the hash table. Returns zero on success, negative 1177 * connection hash table in a form suitable for use in a kernel generated 1408 * netlbl_unlabel_init - Initialize the unlabeled connection hash table 1409 * @size: the number of bits to use for the hash buckets 1412 * Initializes the unlabeled connection hash table and registers a network
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
H A D | ttm_object.c | 50 * ref_hash hash tables. 80 * @object_lock: lock that protects the object_hash hash table. 82 * @object_hash: hash table for fast lookup of object global names. 102 * @hash: Hash entry for the per-file object reference hash. 112 * This is similar to an idr object, but it also has a hash table entry 122 struct drm_hash_item hash; member in struct:ttm_ref_object 176 &base->hash, ttm_base_object_init() 191 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); ttm_base_object_init() 205 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash); ttm_release_base() 233 struct drm_hash_item *hash; ttm_base_object_lookup() local 238 ret = drm_ht_find_item_rcu(ht, key, &hash); ttm_base_object_lookup() 241 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj; ttm_base_object_lookup() 255 struct drm_hash_item *hash; ttm_base_object_lookup_for_ref() local 260 ret = drm_ht_find_item_rcu(ht, key, &hash); ttm_base_object_lookup_for_ref() 263 base = drm_hash_entry(hash, struct ttm_base_object, hash); ttm_base_object_lookup_for_ref() 287 struct drm_hash_item *hash; ttm_ref_object_exists() local 291 if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0)) ttm_ref_object_exists() 299 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_exists() 325 struct drm_hash_item *hash; ttm_ref_object_add() local 337 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash); ttm_ref_object_add() 340 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_add() 358 ref->hash.key = base->hash.key; ttm_ref_object_add() 365 ret = drm_ht_insert_item_rcu(ht, &ref->hash); ttm_ref_object_add() 397 (void)drm_ht_remove_item_rcu(ht, &ref->hash); ttm_ref_object_release() 415 struct drm_hash_item *hash; ttm_ref_object_base_unref() local 419 ret = drm_ht_find_item(ht, key, &hash); ttm_ref_object_base_unref() 424 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ttm_ref_object_base_unref() 637 *handle = base->hash.key; ttm_prime_fd_to_handle()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | remote_perm.c | 82 struct hlist_head *hash; alloc_rmtperm_hash() local 85 OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep, alloc_rmtperm_hash() 86 REMOTE_PERM_HASHSIZE * sizeof(*hash), alloc_rmtperm_hash() 88 if (!hash) alloc_rmtperm_hash() 92 INIT_HLIST_HEAD(hash + i); alloc_rmtperm_hash() 94 return hash; alloc_rmtperm_hash() 97 void free_rmtperm_hash(struct hlist_head *hash) free_rmtperm_hash() argument 103 if (!hash) free_rmtperm_hash() 107 hlist_for_each_entry_safe(lrp, next, hash + i, free_rmtperm_hash() 110 OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep, free_rmtperm_hash() 111 REMOTE_PERM_HASHSIZE * sizeof(*hash)); free_rmtperm_hash() 314 struct hlist_head *hash = lli->lli_remote_perms; 319 LASSERT(hash); 324 hlist_for_each_entry_safe(lrp, node, next, hash + i,
|
H A D | dir.c | 75 * hash, and so readdir should be done in hash order. 77 * New readdir implementation does readdir in hash order, and uses hash of a 80 * . hash is not unique, so it cannot be used to index cached directory 81 * pages on the client (note, that it requires a whole pageful of hash 84 * . hash is not unique, so it cannot, strictly speaking, be used as an 86 * mimics their solution: seekdir(hash) positions directory at the first 87 * entry with the given hash. 93 * Client caches directory pages using hash of the first entry as an index. As 94 * noted above hash is not unique, so this solution doesn't work as is: 95 * special processing is needed for "page hash chains" (i.e., sequences of 96 * pages filled with entries all having the same hash value). 99 * client the hash of the first entry on the page next to one returned. When 100 * client detects that this hash is the same as hash of the first entry on the 101 * returned page, page hash collision has to be handled. Pages in the 102 * hash chain, except first one, are termed "overflow pages". 105 * pages. Instead, when page hash collision is detected, all overflow pages 109 * invocation finishes, overflow pages are discarded. If page hash collision 111 * page hash collision, again read overflow pages in, process next portion of 113 * because, given reasonable hash, page hash collisions are extremely rare. 117 * When seekdir(hash) is called, original 133 * a header lu_dirpage which describes the start/end hash, and whether this 134 * page is empty (contains no dir entry) or hash collide with next page. 151 __u64 hash = *((__u64 *)_hash); ll_dir_filler() local 161 CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n", ll_dir_filler() 162 inode->i_ino, inode->i_generation, inode, hash); ll_dir_filler() 183 op_data->op_offset = hash; ll_dir_filler() 221 hash = le64_to_cpu(dp->ldp_hash_start); ll_dir_filler() 224 offset = hash_x_index(hash, hash64); ll_dir_filler() 265 * Find, kmap and return page that contains given hash. 267 static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, ll_dir_page_locate() argument 273 * Complement of hash is used as an index so that ll_dir_page_locate() 275 * hash _smaller_ than one we are looking for. ll_dir_page_locate() 277 unsigned long offset = hash_x_index(*hash, hash64); ll_dir_page_locate() 303 *hash = *hash >> 32; ll_dir_page_locate() 308 LASSERTF(*start <= *hash, "start = %#llx,end = %#llx,hash = %#llx\n", ll_dir_page_locate() 309 *start, *end, *hash); ll_dir_page_locate() 310 CDEBUG(D_VFSTRACE, "page %lu [%llu %llu], hash %llu\n", ll_dir_page_locate() 311 offset, *start, *end, *hash); ll_dir_page_locate() 312 if (*hash > *end) { ll_dir_page_locate() 315 } else if (*end != *start && *hash == *end) { ll_dir_page_locate() 317 * upon hash collision, remove this page, ll_dir_page_locate() 338 struct page *ll_get_dir_page(struct inode *dir, __u64 hash, ll_get_dir_page() argument 350 __u64 lhash = hash; ll_get_dir_page() 383 PFID(ll_inode2fid(dir)), hash, rc); ll_get_dir_page() 408 * suppose hash chain of entries with hash value HASH crosses ll_get_dir_page() 412 * happens and finds P1, as it starts with matching hash ll_get_dir_page() 423 page = read_cache_page(mapping, hash_x_index(hash, hash64), ll_get_dir_page() 427 PFID(ll_inode2fid(dir)), hash, PTR_ERR(page)); ll_get_dir_page() 435 PFID(ll_inode2fid(dir)), hash, -5); ll_get_dir_page() 442 PFID(ll_inode2fid(dir)), hash, -5); ll_get_dir_page() 450 lhash = hash >> 32; ll_get_dir_page() 454 lhash = hash; ll_get_dir_page() 458 CWARN("Page-wide hash collision: %llu\n", end); ll_get_dir_page() 460 CWARN("Real page-wide hash collision at [%llu %llu] with hash %llu\n", ll_get_dir_page() 462 le64_to_cpu(dp->ldp_hash_end), hash); ll_get_dir_page() 506 __u64 hash = MDS_DIR_END_OFF; ll_dir_read() local 522 hash = le64_to_cpu(ent->lde_hash); ll_dir_read() 523 if (hash < pos) ll_dir_read() 525 * Skip until we find target hash ll_dir_read() 538 lhash = hash >> 32; ll_dir_read() 540 lhash = hash; ll_dir_read() 582 pos = hash; ll_dir_read()
|
/linux-4.1.27/arch/x86/boot/compressed/ |
H A D | aslr.c | 37 static unsigned long rotate_xor(unsigned long hash, const void *area, rotate_xor() argument 43 for (i = 0; i < size / sizeof(hash); i++) { rotate_xor() 45 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); rotate_xor() 46 hash ^= ptr[i]; rotate_xor() 49 return hash; rotate_xor() 55 unsigned long hash = 0; get_random_boot() local 57 hash = rotate_xor(hash, build_str, sizeof(build_str)); get_random_boot() 58 hash = rotate_xor(hash, real_mode, sizeof(*real_mode)); get_random_boot() 60 return hash; get_random_boot()
|
/linux-4.1.27/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_cmdbuf_res.c | 42 * @hash: Hash entry for the manager hash table. 50 struct drm_hash_item hash; member in struct:vmw_cmdbuf_res 89 struct drm_hash_item *hash; vmw_cmdbuf_res_lookup() local 93 ret = drm_ht_find_item(&man->resources, key, &hash); vmw_cmdbuf_res_lookup() 98 (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res); vmw_cmdbuf_res_lookup() 114 WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash)); vmw_cmdbuf_res_free() 175 &entry->hash); list_for_each_entry_safe() 197 * resource to the hash table of the manager identified by @man. The 213 cres->hash.key = user_key | (res_type << 24); vmw_cmdbuf_res_add() 214 ret = drm_ht_insert_item(&man->resources, &cres->hash); vmw_cmdbuf_res_add() 236 * hash table and, if it exists, removes it. Depending on its current staging 246 struct drm_hash_item *hash; vmw_cmdbuf_res_remove() local 250 &hash); vmw_cmdbuf_res_remove() 254 entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash); vmw_cmdbuf_res_remove() 261 (void) drm_ht_remove_item(&man->resources, &entry->hash); vmw_cmdbuf_res_remove()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | ramht.c | 30 u32 hash = 0; nvkm_ramht_hash() local 33 hash ^= (handle & ((1 << ramht->bits) - 1)); nvkm_ramht_hash() 37 hash ^= chid << (ramht->bits - 4); nvkm_ramht_hash() 38 hash = hash << 3; nvkm_ramht_hash() 39 return hash; nvkm_ramht_hash()
|
/linux-4.1.27/net/ipv4/ |
H A D | tcp_metrics.c | 9 #include <linux/hash.h> 27 struct net *net, unsigned int hash); 158 unsigned int hash) tcpm_new() 170 tm = __tcp_get_metrics(saddr, daddr, net, hash); tcpm_new() 183 oldest = deref_locked(tcp_metrics_hash[hash].chain); tcpm_new() 202 tm->tcpm_next = tcp_metrics_hash[hash].chain; tcpm_new() 203 rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm); tcpm_new() 222 struct net *net, unsigned int hash) __tcp_get_metrics() 227 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; __tcp_get_metrics() 243 unsigned int hash; __tcp_get_metrics_req() local 252 hash = (__force unsigned int) daddr.addr.a4; __tcp_get_metrics_req() 258 hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr); __tcp_get_metrics_req() 266 hash ^= net_hash_mix(net); __tcp_get_metrics_req() 267 hash = hash_32(hash, tcp_metrics_hash_log); __tcp_get_metrics_req() 269 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; __tcp_get_metrics_req() 284 unsigned int hash; __tcp_get_metrics_tw() local 292 hash = (__force unsigned int) daddr.addr.a4; __tcp_get_metrics_tw() 301 hash = (__force unsigned int) daddr.addr.a4; __tcp_get_metrics_tw() 307 hash = ipv6_addr_hash(&tw->tw_v6_daddr); __tcp_get_metrics_tw() 315 hash ^= net_hash_mix(net); __tcp_get_metrics_tw() 316 hash = hash_32(hash, tcp_metrics_hash_log); __tcp_get_metrics_tw() 318 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; __tcp_get_metrics_tw() 334 unsigned int hash; tcp_get_metrics() local 342 hash = (__force unsigned int) daddr.addr.a4; tcp_get_metrics() 351 hash = (__force unsigned int) daddr.addr.a4; tcp_get_metrics() 357 hash = ipv6_addr_hash(&sk->sk_v6_daddr); tcp_get_metrics() 365 hash ^= net_hash_mix(net); tcp_get_metrics() 366 hash = hash_32(hash, tcp_metrics_hash_log); tcp_get_metrics() 368 tm = __tcp_get_metrics(&saddr, &daddr, net, hash); tcp_get_metrics() 372 tm = tcpm_new(dst, &saddr, &daddr, hash); tcp_get_metrics() 953 unsigned int *hash, int optional, int v4, int v6) __parse_nl_addr() 961 if (hash) __parse_nl_addr() 962 *hash = (__force unsigned int) addr->addr.a4; __parse_nl_addr() 971 if (hash) __parse_nl_addr() 972 *hash = ipv6_addr_hash(&addr->addr.in6); __parse_nl_addr() 979 unsigned int *hash, int optional) parse_nl_addr() 981 return __parse_nl_addr(info, addr, hash, optional, parse_nl_addr() 997 unsigned int hash; tcp_metrics_nl_cmd_get() local 1004 ret = parse_nl_addr(info, &daddr, &hash, 0); tcp_metrics_nl_cmd_get() 1021 hash ^= net_hash_mix(net); tcp_metrics_nl_cmd_get() 1022 hash = hash_32(hash, tcp_metrics_hash_log); tcp_metrics_nl_cmd_get() 1025 for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tcp_metrics_nl_cmd_get() 1078 unsigned int hash; tcp_metrics_nl_cmd_del() local 1083 ret = parse_nl_addr(info, &daddr, &hash, 1); tcp_metrics_nl_cmd_del() 1094 hash ^= net_hash_mix(net); tcp_metrics_nl_cmd_del() 1095 hash = hash_32(hash, tcp_metrics_hash_log); tcp_metrics_nl_cmd_del() 1096 hb = tcp_metrics_hash + hash; tcp_metrics_nl_cmd_del() 1192 panic("Could not allocate the tcp_metrics hash table\n"); tcp_metrics_init() 155 tcpm_new(struct dst_entry *dst, struct inetpeer_addr *saddr, struct inetpeer_addr *daddr, unsigned int hash) tcpm_new() argument 220 __tcp_get_metrics(const struct inetpeer_addr *saddr, const struct inetpeer_addr *daddr, struct net *net, unsigned int hash) __tcp_get_metrics() argument 952 __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, unsigned int *hash, int optional, int v4, int v6) __parse_nl_addr() argument 978 parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, unsigned int *hash, int optional) parse_nl_addr() argument
|
H A D | inet_fragment.c | 83 hb = &f->hash[i]; inet_frag_secret_rebuild() 94 /* Relink to new hash chain. */ inet_frag_secret_rebuild() 95 hb_dest = &f->hash[hval]; inet_frag_secret_rebuild() 180 evicted += inet_evict_bucket(f, &f->hash[i]); inet_frag_worker() 207 struct inet_frag_bucket *hb = &f->hash[i]; inet_frags_init() 249 inet_evict_bucket(f, &f->hash[i]); inet_frags_exit_net() 265 unsigned int seq, hash; variable 270 hash = inet_frag_hashfn(f, fq); 271 hb = &f->hash[hash]; 350 /* With SMP race we have to recheck hash table, because inet_frag_intern() 352 * we acquired hash bucket lock. inet_frag_intern() 417 unsigned int hash) inet_frag_find() 426 hash &= (INETFRAGS_HASHSZ - 1); inet_frag_find() 427 hb = &f->hash[hash]; inet_frag_find() 456 static const char msg[] = "inet_frag_find: Fragment hash bucket" inet_frag_maybe_warn_overflow() 415 inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) inet_frag_find() argument
|
H A D | inet_hashtables.c | 58 * The bindhash mutex for snum's hash chain must be held here. 213 unsigned int hash = inet_lhashfn(net, hnum); __inet_lookup_listener() local 214 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; __inet_lookup_listener() 245 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) __inet_lookup_listener() 295 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); __inet_lookup_established() local 296 unsigned int slot = hash & hashinfo->ehash_mask; __inet_lookup_established() 302 if (sk->sk_hash != hash) __inet_lookup_established() 344 unsigned int hash = inet_ehashfn(net, daddr, lport, __inet_check_established() local 346 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); __inet_check_established() 347 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); __inet_check_established() 356 if (sk2->sk_hash != hash) __inet_check_established() 371 * in hash table socket with a funny identity. __inet_check_established() 375 sk->sk_hash = hash; __inet_check_established() 390 /* Silly. Should hash-dance instead... */ __inet_check_established() 588 /* No definite answer... Walk to established hash table */ __inet_hash_connect() 597 * Bind a port for a connect operation and hash it.
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
H A D | mthca_mcg.c | 54 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 58 * previous entry in hash chain and *mgm holds AMGM entry. 61 * entry in hash chain and *mgm holds end of hash chain. 65 u16 *hash, int *prev, int *index) find_mgm() 79 err = mthca_MGID_HASH(dev, mailbox, hash); find_mgm() 86 mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); find_mgm() 88 *index = *hash; find_mgm() 99 if (*index != *hash) { find_mgm() 125 u16 hash; mthca_multicast_attach() local 138 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); mthca_multicast_attach() 219 u16 hash; mthca_multicast_detach() local 231 err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); mthca_multicast_detach() 63 find_mgm(struct mthca_dev *dev, u8 *gid, struct mthca_mailbox *mgm_mailbox, u16 *hash, int *prev, int *index) find_mgm() argument
|
/linux-4.1.27/drivers/base/power/ |
H A D | trace.c | 60 * a case where we have a hash collision, and we end up not 130 * This is just the sdbm hash function with a user-supplied 150 * section instead. Generating a hash of the data gives us a 153 * likely not give totally bogus reports - if the hash matches, 180 unsigned int hash = hash_string(lineno, file, FILEHASH); show_file_hash() local 181 if (hash != value) show_file_hash() 183 pr_info(" hash matches %s:%u\n", file, lineno); show_file_hash() 198 unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); show_dev_hash() local 199 if (hash == value) { show_dev_hash() 200 dev_info(dev, "hash matches\n"); show_dev_hash() 218 * It's possible that multiple devices will match the hash and we can't show_trace_dev_match() 225 unsigned int hash = hash_string(DEVSEED, dev_name(dev), show_trace_dev_match() local 227 if (hash == value) { show_trace_dev_match()
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/ |
H A D | hash.c | 36 * libcfs/libcfs/hash.c 38 * Implement a hash class for hash process in lustre system. 44 * - Added per-hash feature flags: 47 * - Added per-hash statistics 62 * - support lockless hash, caller will take care of locks: 63 * avoid lock overhead for hash tables that are already protected 69 * bucket is more reasonable for those frequently changed hash tables 72 * one lock to protect all hash operations to avoid overhead of 73 * multiple locks if hash table is always small 75 * - removed a lot of unnecessary addref & decref on hash element: 84 * - safer rehash on large hash table 86 * hash table and finish rehash in one batch, it's dangerous on SMP 90 * hash table even it's in rehasing. 93 * . hash table has refcount on element 94 * . hash table doesn't change refcount on adding/removing element 96 * - support long name hash table (for param-tree) 100 * hash-table because @key is overwritten without any protection. 102 * hash tables, cfs_hash_rehash_key will overwrite hash-key 105 * - better hash iteration: 106 * Now we support both locked iteration & lockless iteration of hash 116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); 161 /** No lock hash */ 239 * Simple hash head without depth tracking 277 * Simple hash head with depth tracking 321 * double links hash head without depth tracking 375 * double links hash head with depth tracking 957 * Initialize new libcfs hash, where: 958 * @name - Descriptive hash name 959 * @cur_bits - Initial hash table size, in bits 960 * @max_bits - Maximum allowed hash table resize, in bits 961 * @ops - Registered hash table operations 962 * @flags - CFS_HASH_REHASH enable synamic hash resizing 963 * - CFS_HASH_SORT enable chained hash sort 1091 * Cleanup libcfs hash @hs. 1128 "hash %s bucket %u(%u) is not empty: %u items left\n", hlist_for_each_safe() 1200 * - user wants non-blocking change (add/del) on hash table 1211 * Add item @hnode to libcfs hash @hs using @key. The registered 1265 * Add item @hnode to libcfs hash @hs using @key. The registered 1278 * Add item @hnode to libcfs hash @hs using @key. If this @key 1279 * already exists in the hash then ops->hs_get will be called on the 1294 * Delete item @hnode from the libcfs hash @hs using @key. The @key 1295 * is required to ensure the correct hash bucket is locked since there 1297 * removed from the hash will be returned and obs->hs_put is called 1310 /* NB: do nothing if @hnode is not in hash table */ cfs_hash_del() 1335 * Delete item given @key in libcfs hash @hs. The first @key found in 1336 * the hash will be removed, if the key exists multiple times in the hash 1348 * Lookup an item using @key in the libcfs hash @hs and return it. 1349 * If the @key is found in the hash hs->hs_get() is called and the 1353 * in the hash @hs NULL is returned. 1422 * For each item in the libcfs hash @hs call the passed callback @func 1423 * and pass to it as an argument each hash item and the private @data. 1500 * Delete item from the libcfs hash @hs when @func return true. 1560 * Iterate the hash table and call @func on each item without 1566 * b. user can remove non-zero-ref item from hash-table, 1567 * so the item can be removed from hash-table, even worse, 1569 * hash bucket. 1656 * For each hash bucket in the libcfs hash @hs call the passed callback 1657 * @func until all the hash buckets are empty. The passed callback @func 1659 * from the hash. You may either use the cfs_hash_del() or hlist_del() 1662 * hash is empty. Note it is still possible to concurrently add new 1663 * items in to the hash. It is the callers responsibility to ensure 1681 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n", cfs_hash_for_each_empty() 1719 * For each item in the libcfs hash @hs which matches the @key call 1720 * the passed callback @func and pass to it as an argument each hash 1754 * Rehash the libcfs hash @hs to the given @bits. This can be used 1755 * to grow the hash size when excessive chaining is detected, or to 1756 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH 1757 * flag is set in @hs the libcfs hash may be dynamically rehashed 1758 * during addition or removal if the hash's theta value exceeds 1760 * these values are tuned to keep the chained hash depth small, and 1785 "hash %s is still rehashing, rescheded %d\n", cfs_hash_rehash_cancel_locked() 1850 * Delete from old hash bucket; move to new bucket. hlist_for_each_safe() 1919 /* someone wants to destroy the hash, abort now */ cfs_hash_rehash_worker() 1964 * Rehash the object referenced by @hnode in the libcfs hash @hs. The 1966 * in the hash, and the @new_key will be used to reinsert the object. 1969 * object is missing from the hash. When an object is being rehashed 2066 * The distribution is a summary of the chained hash depth in cfs_hash_debug_str() 2067 * each of the libcfs hash buckets. Each buckets hsb_count is cfs_hash_debug_str() 2068 * divided by the hash theta value and used to generate a cfs_hash_debug_str() 2069 * histogram of the hash distribution. A uniform hash will cfs_hash_debug_str() 2070 * result in all hash buckets being close to the average thus cfs_hash_debug_str() 2072 * If you hash function results in a non-uniform hash the will cfs_hash_debug_str() 2075 * Uniform hash distribution: 128/128/0/0/0/0/0/0 cfs_hash_debug_str() 2076 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1 cfs_hash_debug_str()
|
H A D | Makefile | 14 libcfs_string.o hash.o kernel_user_comm.o \
|
/linux-4.1.27/arch/sparc/crypto/ |
H A D | md5_glue.c | 16 #include <crypto/internal/hash.h> 36 mctx->hash[0] = cpu_to_le32(0x67452301); md5_sparc64_init() 37 mctx->hash[1] = cpu_to_le32(0xefcdab89); md5_sparc64_init() 38 mctx->hash[2] = cpu_to_le32(0x98badcfe); md5_sparc64_init() 39 mctx->hash[3] = cpu_to_le32(0x10325476); md5_sparc64_init() 54 md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1); __md5_sparc64_update() 59 md5_sparc64_transform(sctx->hash, data + done, rounds); __md5_sparc64_update() 108 dst[i] = sctx->hash[i]; md5_sparc64_final()
|
H A D | crc32c_glue.c | 18 #include <crypto/internal/hash.h> 30 static int crc32c_sparc64_setkey(struct crypto_shash *hash, const u8 *key, crc32c_sparc64_setkey() argument 33 u32 *mctx = crypto_shash_ctx(hash); crc32c_sparc64_setkey() 36 crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN); crc32c_sparc64_setkey()
|
/linux-4.1.27/fs/btrfs/ |
H A D | hash.c | 14 #include <crypto/hash.h> 16 #include "hash.h"
|
/linux-4.1.27/fs/coda/ |
H A D | cnode.c | 64 unsigned long hash = coda_f2i(fid); coda_iget() local 66 inode = iget5_locked(sb, hash, coda_test_inode, coda_set_inode, fid); coda_iget() 74 inode->i_ino = hash; coda_iget() 121 unsigned long hash = coda_f2i(newfid); coda_replace_fid() local 129 inode->i_ino = hash; coda_replace_fid() 130 __insert_inode_hash(inode, hash); coda_replace_fid() 137 unsigned long hash = coda_f2i(fid); coda_fid_to_inode() local 144 inode = ilookup5(sb, hash, coda_test_inode, fid); coda_fid_to_inode()
|
/linux-4.1.27/arch/x86/purgatory/ |
H A D | sha256.h | 20 extern int sha256_final(struct sha256_state *sctx, u8 *hash);
|
/linux-4.1.27/arch/x86/vdso/ |
H A D | vdso-layout.lds.S | 42 .hash : { *(.hash) } :text 43 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/sh/kernel/vsyscall/ |
H A D | vsyscall.lds.S | 22 .hash : { *(.hash) } :text 23 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/tile/kernel/vdso/ |
H A D | vdso.lds.S | 28 .hash : { *(.hash) } :text 29 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/drivers/target/tcm_fc/ |
H A D | tfc_sess.c | 30 #include <linux/hash.h> 83 INIT_HLIST_HEAD(&tport->hash[i]); ft_tport_get() 166 * Sessions and hash lists are RCU-protected. 180 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 181 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 196 * Allocate session and enter it in the hash for the local port. 205 head = &tport->hash[ft_sess_hash(port_id)]; ft_sess_create() 206 hlist_for_each_entry_rcu(sess, head, hash) ft_sess_create() 225 hlist_add_head_rcu(&sess->hash, head); ft_sess_create() 243 hlist_del_rcu(&sess->hash); ft_sess_unhash() 251 * Delete session from hash. 259 head = &tport->hash[ft_sess_hash(port_id)]; hlist_for_each_entry_rcu() 260 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu() 278 for (head = tport->hash; ft_sess_delete_all() 279 head < &tport->hash[FT_SESS_HASH_SIZE]; head++) { hlist_for_each_entry_rcu() 280 hlist_for_each_entry_rcu(sess, head, hash) { hlist_for_each_entry_rcu()
|
H A D | tcm_fc.h | 38 u32 port_id; /* for hash lookup use only */ 44 struct hlist_node hash; /* linkage in ft_sess_hash table */ member in struct:ft_sess 46 struct kref kref; /* ref for hash and outstanding I/Os */ 66 u32 sess_count; /* number of sessions in hash */ 68 struct hlist_head hash[FT_SESS_HASH_SIZE]; /* list of sessions */ member in struct:ft_tport
|
/linux-4.1.27/arch/s390/kernel/vdso32/ |
H A D | vdso32.lds.S | 15 .hash : { *(.hash) } :text 16 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/s390/kernel/vdso64/ |
H A D | vdso64.lds.S | 15 .hash : { *(.hash) } :text 16 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/arm64/kernel/vdso/ |
H A D | vdso.lds.S | 34 .hash : { *(.hash) } :text 35 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | gate.lds.S | 15 .hash : { *(.hash) } :readable 16 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/net/netfilter/ipset/ |
H A D | Makefile | 15 # hash types
|
/linux-4.1.27/net/xfrm/ |
H A D | xfrm_hash.c | 1 /* xfrm_hash.c: Common hash table code.
|
/linux-4.1.27/include/uapi/linux/netfilter_bridge/ |
H A D | ebt_among.h | 11 * Write-once-read-many hash table, used for checking if a given 15 * The hash value of an address is its last byte. 24 * hash table, which are stored also in form of pairs of integers
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | pte-hash32.h | 6 * The "classic" 32-bit implementation of the PowerPC MMU uses a hash 10 * We use the hash table as an extended TLB, i.e. a cache of currently 15 * tree and putting them into the hash table when necessary, and
|
H A D | pgtable.h | 125 * the hash bits instead (ie, same as the non-SMP case) __set_pte_at() 137 * in the hash code, to pre-invalidate if the PTE was already hashed, __set_pte_at() 140 * the hash bits __set_pte_at() 159 /* Third case is 32-bit hash table in UP mode, we need to preserve __set_pte_at() 161 * translation in the hash yet (done in a subsequent flush_tlb_xxx()) __set_pte_at() 169 * cases, and 32-bit non-hash with 32-bit PTEs. __set_pte_at() 237 * On machines which use an MMU hash table, we use this to put a 238 * corresponding HPTE into the hash table ahead of time, instead of 239 * waiting for the inevitable extra hash-table miss exception.
|
/linux-4.1.27/kernel/trace/ |
H A D | ftrace.c | 33 #include <linux/hash.h> 59 /* hash bits for specific function selection */ 514 struct hlist_head *hash; member in struct:ftrace_profile_stat 686 memset(stat->hash, 0, ftrace_profile_reset() 753 if (stat->hash) { ftrace_profile_init_cpu() 761 * functions are hit. We'll make a hash of 1024 items. ftrace_profile_init_cpu() 765 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); ftrace_profile_init_cpu() 767 if (!stat->hash) ftrace_profile_init_cpu() 772 kfree(stat->hash); ftrace_profile_init_cpu() 773 stat->hash = NULL; ftrace_profile_init_cpu() 803 hhd = &stat->hash[key]; ftrace_find_profiled_func() 822 hlist_add_head_rcu(&rec->node, &stat->hash[key]); ftrace_add_profile() 875 if (!stat->hash || !ftrace_profile_enabled) function_profile_call() 906 if (!stat->hash || !ftrace_profile_enabled) profile_graph_return() 1137 * but they are used as the default "empty hash", to avoid allocating 1210 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) ftrace_hash_empty() argument 1212 return !hash || !hash->count; ftrace_hash_empty() 1216 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) ftrace_lookup_ip() argument 1222 if (ftrace_hash_empty(hash)) ftrace_lookup_ip() 1225 if (hash->size_bits > 0) ftrace_lookup_ip() 1226 key = hash_long(ip, hash->size_bits); ftrace_lookup_ip() 1230 hhd = &hash->buckets[key]; ftrace_lookup_ip() 1239 static void __add_hash_entry(struct ftrace_hash *hash, __add_hash_entry() argument 1245 if (hash->size_bits) __add_hash_entry() 1246 key = hash_long(entry->ip, hash->size_bits); __add_hash_entry() 1250 hhd = &hash->buckets[key]; __add_hash_entry() 1252 hash->count++; __add_hash_entry() 1255 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) add_hash_entry() argument 1264 __add_hash_entry(hash, entry); add_hash_entry() 1270 free_hash_entry(struct ftrace_hash *hash, free_hash_entry() argument 1275 hash->count--; free_hash_entry() 1279 remove_hash_entry(struct ftrace_hash *hash, remove_hash_entry() argument 1283 hash->count--; remove_hash_entry() 1286 static void ftrace_hash_clear(struct ftrace_hash *hash) ftrace_hash_clear() argument 1291 int size = 1 << hash->size_bits; ftrace_hash_clear() 1294 if (!hash->count) ftrace_hash_clear() 1298 hhd = &hash->buckets[i]; ftrace_hash_clear() 1300 free_hash_entry(hash, entry); ftrace_hash_clear() 1302 FTRACE_WARN_ON(hash->count); ftrace_hash_clear() 1305 static void free_ftrace_hash(struct ftrace_hash *hash) free_ftrace_hash() argument 1307 if (!hash || hash == EMPTY_HASH) free_ftrace_hash() 1309 ftrace_hash_clear(hash); free_ftrace_hash() 1310 kfree(hash->buckets); free_ftrace_hash() 1311 kfree(hash); free_ftrace_hash() 1316 struct ftrace_hash *hash; __free_ftrace_hash_rcu() local 1318 hash = container_of(rcu, struct ftrace_hash, rcu); __free_ftrace_hash_rcu() 1319 free_ftrace_hash(hash); __free_ftrace_hash_rcu() 1322 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) free_ftrace_hash_rcu() argument 1324 if (!hash || hash == EMPTY_HASH) free_ftrace_hash_rcu() 1326 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); free_ftrace_hash_rcu() 1338 struct ftrace_hash *hash; alloc_ftrace_hash() local 1341 hash = kzalloc(sizeof(*hash), GFP_KERNEL); alloc_ftrace_hash() 1342 if (!hash) alloc_ftrace_hash() 1346 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); alloc_ftrace_hash() 1348 if (!hash->buckets) { alloc_ftrace_hash() 1349 kfree(hash); alloc_ftrace_hash() 1353 hash->size_bits = size_bits; alloc_ftrace_hash() 1355 return hash; alloc_ftrace_hash() 1359 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) alloc_and_copy_ftrace_hash() argument 1371 /* Empty hash? */ alloc_and_copy_ftrace_hash() 1372 if (ftrace_hash_empty(hash)) alloc_and_copy_ftrace_hash() 1375 size = 1 << hash->size_bits; alloc_and_copy_ftrace_hash() 1377 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { alloc_and_copy_ftrace_hash() 1384 FTRACE_WARN_ON(new_hash->count != hash->count); alloc_and_copy_ftrace_hash() 1414 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ ftrace_hash_move() 1428 * Make the hash size about 1/2 the # found ftrace_hash_move() 1462 * Remove the current set, update the hash and add 1475 struct ftrace_ops_hash *hash) hash_contains_ip() 1479 * hash and not in the notrace hash. Note, an emty hash is hash_contains_ip() 1480 * considered a match for the filter hash, but an empty hash_contains_ip() 1481 * notrace hash is considered not in the notrace hash. hash_contains_ip() 1483 return (ftrace_hash_empty(hash->filter_hash) || hash_contains_ip() 1484 ftrace_lookup_ip(hash->filter_hash, ip)) && hash_contains_ip() 1485 (ftrace_hash_empty(hash->notrace_hash) || hash_contains_ip() 1486 !ftrace_lookup_ip(hash->notrace_hash, ip)); hash_contains_ip() 1504 struct ftrace_ops_hash hash; ftrace_ops_test() local 1517 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); ftrace_ops_test() 1518 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); ftrace_ops_test() 1520 if (hash_contains_ip(ip, &hash)) ftrace_ops_test() 1636 struct ftrace_hash *hash; __ftrace_hash_rec_update() local 1650 * Otherwise we just update the items in the hash. __ftrace_hash_rec_update() 1653 * We enable the update in the hash. __ftrace_hash_rec_update() 1659 hash = ops->func_hash->filter_hash; __ftrace_hash_rec_update() 1661 if (ftrace_hash_empty(hash)) __ftrace_hash_rec_update() 1665 hash = ops->func_hash->notrace_hash; __ftrace_hash_rec_update() 1668 * If the notrace hash has no items, __ftrace_hash_rec_update() 1671 if (ftrace_hash_empty(hash)) __ftrace_hash_rec_update() 1683 * Update if the record is not in the notrace hash. do_for_each_ftrace_rec() 1688 in_hash = !!ftrace_lookup_ip(hash, rec->ip); do_for_each_ftrace_rec() 1693 * that are in the hash but not in the other hash. do_for_each_ftrace_rec() 1696 * That means we match anything that is in the hash do_for_each_ftrace_rec() 1698 * off functions in the other hash because they are disabled do_for_each_ftrace_rec() 1699 * by this hash. do_for_each_ftrace_rec() 1773 if (!all && count == hash->count) do_for_each_ftrace_rec() 1801 * If the ops shares the global_ops hash, then we need to update ftrace_hash_rec_update_modify() 1802 * all ops that are enabled and use this hash. ftrace_hash_rec_update_modify() 1830 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1831 * - If the hash is EMPTY_HASH, it hits nothing 1832 * - Anything else hits the recs which match the hash entries. 1851 * allow ftrace_ops to set all functions to new hash. __ftrace_hash_update_ipmodify() 1900 struct ftrace_hash *hash = ops->func_hash->filter_hash; ftrace_hash_ipmodify_enable() local 1902 if (ftrace_hash_empty(hash)) ftrace_hash_ipmodify_enable() 1903 hash = NULL; ftrace_hash_ipmodify_enable() 1905 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); ftrace_hash_ipmodify_enable() 1911 struct ftrace_hash *hash = ops->func_hash->filter_hash; ftrace_hash_ipmodify_disable() local 1913 if (ftrace_hash_empty(hash)) ftrace_hash_ipmodify_disable() 1914 hash = NULL; ftrace_hash_ipmodify_disable() 1916 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); ftrace_hash_ipmodify_disable() 2187 * hash, then it is probably being removed from this do_for_each_ftrace_op() 2195 * in its normal filter hash, then this must be the one do_for_each_ftrace_op() 2771 * But notrace hash requires a test of individual module functions. ops_traces_mod() 2800 /* If in notrace hash, we ignore it too */ ops_references_rec() 2996 struct ftrace_hash *hash; member in struct:ftrace_iterator 3315 * @ops: The ftrace_ops that hold the hash filters 3321 * @ops. Depending on @flag it may process the filter hash or 3322 * the notrace hash of @ops. With this called from the open 3334 struct ftrace_hash *hash; ftrace_regex_open() local 3357 hash = ops->func_hash->notrace_hash; ftrace_regex_open() 3359 hash = ops->func_hash->filter_hash; ftrace_regex_open() 3365 iter->hash = alloc_ftrace_hash(size_bits); ftrace_regex_open() 3367 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); ftrace_regex_open() 3369 if (!iter->hash) { ftrace_regex_open() 3386 free_ftrace_hash(iter->hash); ftrace_regex_open() 3447 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) enter_record() argument 3452 entry = ftrace_lookup_ip(hash, rec->ip); enter_record() 3458 free_hash_entry(hash, entry); enter_record() 3464 ret = add_hash_entry(hash, rec->ip); enter_record() 3492 match_records(struct ftrace_hash *hash, char *buff, match_records() argument 3515 ret = enter_record(hash, rec, not); do_for_each_ftrace_rec() 3530 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) ftrace_match_records() argument 3532 return match_records(hash, buff, len, NULL, 0); ftrace_match_records() 3536 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) ftrace_match_module_records() argument 3550 return match_records(hash, buff, strlen(buff), mod, not); ftrace_match_module_records() 3559 ftrace_mod_callback(struct ftrace_hash *hash, ftrace_mod_callback() argument 3581 ret = ftrace_match_module_records(hash, func, mod); ftrace_mod_callback() 3617 * period. This syncs the hash iteration and freeing of items function_trace_probe_call() 3618 * on the hash. rcu_read_lock is too dangerous here. function_trace_probe_call() 3699 struct ftrace_hash *hash; register_ftrace_function_probe() local 3721 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); register_ftrace_function_probe() 3722 if (!hash) { register_ftrace_function_probe() 3764 ret = enter_record(hash, rec, 0); do_for_each_ftrace_rec() 3779 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3792 free_ftrace_hash(hash); 3812 struct ftrace_hash *hash; __unregister_ftrace_function_probe() local 3835 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); __unregister_ftrace_function_probe() 3836 if (!hash) __unregister_ftrace_function_probe() 3862 rec_entry = ftrace_lookup_ip(hash, entry->ip); hlist_for_each_entry_safe() 3865 free_hash_entry(hash, rec_entry); hlist_for_each_entry_safe() 3875 * probe is removed, a null hash means *all enabled*. 3877 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3890 free_ftrace_hash(hash); 3961 static int ftrace_process_regex(struct ftrace_hash *hash, ftrace_process_regex() argument 3971 ret = ftrace_match_records(hash, func, len); ftrace_process_regex() 3986 ret = p->func(hash, func, command, next, enable); ftrace_process_regex() 4016 /* iter->hash is a local copy, so we don't need regex_lock */ ftrace_regex_write() 4023 ret = ftrace_process_regex(iter->hash, parser->buffer, ftrace_regex_write() 4050 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) ftrace_match_addr() argument 4058 entry = ftrace_lookup_ip(hash, ip); ftrace_match_addr() 4061 free_hash_entry(hash, entry); ftrace_match_addr() 4065 return add_hash_entry(hash, ip); ftrace_match_addr() 4106 struct ftrace_hash *hash; ftrace_set_hash() local 4120 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); ftrace_set_hash() 4122 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); ftrace_set_hash() 4124 if (!hash) { ftrace_set_hash() 4129 if (buf && !ftrace_match_records(hash, buf, len)) { ftrace_set_hash() 4134 ret = ftrace_match_addr(hash, ip, remove); ftrace_set_hash() 4143 ret = ftrace_hash_move(ops, enable, orig_hash, hash); ftrace_set_hash() 4153 free_ftrace_hash(hash); ftrace_set_hash() 4374 ftrace_match_records(iter->hash, parser->buffer, parser->idx); ftrace_regex_release() 4394 orig_hash, iter->hash); ftrace_regex_release() 4403 free_ftrace_hash(iter->hash); ftrace_regex_release() 1474 hash_contains_ip(unsigned long ip, struct ftrace_ops_hash *hash) hash_contains_ip() argument
|
/linux-4.1.27/fs/freevxfs/ |
H A D | vxfs_dir.h | 45 * a hash for speeding up directory search (lookup). 47 * The hash may be empty and in fact we do not use it all in the 52 u_int16_t d_nhash; /* no of hash chains */ 53 u_int16_t d_hash[1]; /* hash chain */ 69 u_int16_t d_hashnext; /* next hash entry */
|
/linux-4.1.27/net/sunrpc/ |
H A D | svcauth.c | 19 #include <linux/hash.h> 113 * 'auth_domains' are stored in a hash table indexed by name. 131 hlist_del(&dom->hash); auth_domain_put() 148 hlist_for_each_entry(hp, head, hash) { hlist_for_each_entry() 156 hlist_add_head(&new->hash, head);
|
/linux-4.1.27/Documentation/vDSO/ |
H A D | parse_vdso.c | 134 ELF(Word) *hash = 0; vdso_init_from_sysinfo_ehdr() 152 hash = (ELF(Word) *) vdso_init_from_sysinfo_ehdr() 168 if (!vdso_info.symstrings || !vdso_info.symtab || !hash) vdso_init_from_sysinfo_ehdr() 174 /* Parse the hash table header. */ vdso_init_from_sysinfo_ehdr() 175 vdso_info.nbucket = hash[0]; vdso_init_from_sysinfo_ehdr() 176 vdso_info.nchain = hash[1]; vdso_init_from_sysinfo_ehdr() 177 vdso_info.bucket = &hash[2]; vdso_init_from_sysinfo_ehdr() 178 vdso_info.chain = &hash[vdso_info.nbucket + 2]; vdso_init_from_sysinfo_ehdr() 185 const char *name, ELF(Word) hash) ELF() 189 * ver matches name (which hashes to hash). ELF() 218 return def->vd_hash == hash ELF()
|
/linux-4.1.27/drivers/staging/lustre/lustre/fld/ |
H A D | lproc_fld.c | 95 struct lu_fld_hash *hash = NULL; fld_proc_hash_seq_write() local 113 hash = &fld_hash[i]; fld_proc_hash_seq_write() 118 if (hash != NULL) { fld_proc_hash_seq_write() 120 fld->lcf_hash = hash; fld_proc_hash_seq_write() 123 CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n", fld_proc_hash_seq_write() 124 fld->lcf_name, hash->fh_name); fld_proc_hash_seq_write() 169 { "hash", &fld_proc_hash_fops },
|
H A D | fld_request.c | 122 int hash; fld_rrb_scan() local 126 * hash again, and also if other MDTs is not being connected, fld_rrb_scan() 130 hash = fld_rrb_hash(fld, seq); fld_rrb_scan() 132 hash = 0; fld_rrb_scan() 136 if (target->ft_idx == hash) fld_rrb_scan() 140 if (hash != 0) { fld_rrb_scan() 144 hash = 0; fld_rrb_scan() 148 CERROR("%s: Can't find target by hash %d (seq %#llx). Targets (%d):\n", fld_rrb_scan() 149 fld->lcf_name, hash, seq, fld->lcf_count); fld_rrb_scan() 333 static inline int hash_is_sane(int hash) hash_is_sane() argument 335 return (hash >= 0 && hash < ARRAY_SIZE(fld_hash)); hash_is_sane() 339 const char *prefix, int hash) fld_client_init() 349 if (!hash_is_sane(hash)) { fld_client_init() 350 CERROR("%s: Wrong hash function %#x\n", fld_client_init() 351 fld->lcf_name, hash); fld_client_init() 357 fld->lcf_hash = &fld_hash[hash]; fld_client_init() 382 CDEBUG(D_INFO, "%s: Using \"%s\" hash\n", fld_client_init() 338 fld_client_init(struct lu_client_fld *fld, const char *prefix, int hash) fld_client_init() argument
|
/linux-4.1.27/drivers/net/ethernet/ti/ |
H A D | tlan.h | 517 * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 533 u8 hash; tlan_hash_func() local 535 hash = (a[0]^a[3]); /* & 077 */ tlan_hash_func() 536 hash ^= ((a[0]^a[3])>>6); /* & 003 */ tlan_hash_func() 537 hash ^= ((a[1]^a[4])<<2); /* & 074 */ tlan_hash_func() 538 hash ^= ((a[1]^a[4])>>4); /* & 017 */ tlan_hash_func() 539 hash ^= ((a[2]^a[5])<<4); /* & 060 */ tlan_hash_func() 540 hash ^= ((a[2]^a[5])>>2); /* & 077 */ tlan_hash_func() 542 return hash & 077; tlan_hash_func()
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/ |
H A D | capa.c | 63 /* lock for capa hash/capa_list/fo_capa_keys */ 87 struct hlist_head *hash; init_capa_hash() local 90 OBD_ALLOC(hash, PAGE_CACHE_SIZE); init_capa_hash() 91 if (!hash) init_capa_hash() 98 INIT_HLIST_HEAD(hash + i); init_capa_hash() 99 return hash; init_capa_hash() 118 void cleanup_capa_hash(struct hlist_head *hash) cleanup_capa_hash() argument 126 hlist_for_each_entry_safe(oc, next, hash + i, cleanup_capa_hash() 132 OBD_FREE(hash, PAGE_CACHE_SIZE); cleanup_capa_hash() 193 struct obd_capa *capa_add(struct hlist_head *hash, struct lustre_capa *capa) capa_add() argument 195 struct hlist_head *head = hash + capa_hashfn(&capa->lc_fid); capa_add() 224 struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa, capa_lookup() argument 230 ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive); capa_lookup() 253 CERROR("failed to hash setkey: %d\n", rv); ll_crypto_hmac()
|
/linux-4.1.27/scripts/basic/ |
H A D | fixdep.c | 144 unsigned int hash; member in struct:item 153 /* fnv32 hash */ strhash() 154 unsigned int i, hash = 2166136261U; strhash() local 157 hash = (hash ^ str[i]) * 0x01000193; strhash() 158 return hash; strhash() 164 static int is_defined_config(const char *name, int len, unsigned int hash) is_defined_config() argument 168 for (aux = hashtab[hash % HASHSZ]; aux; aux = aux->next) { is_defined_config() 169 if (aux->hash == hash && aux->len == len && is_defined_config() 179 static void define_config(const char *name, int len, unsigned int hash) define_config() argument 189 aux->hash = hash; define_config() 190 aux->next = hashtab[hash % HASHSZ]; define_config() 191 hashtab[hash % HASHSZ] = aux; define_config() 216 unsigned int hash = strhash(m, slen); use_config() local 219 if (is_defined_config(m, slen, hash)) use_config() 222 define_config(m, slen, hash); use_config()
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_auth.c | 51 * Searches in drm_device::magiclist within all files with the same hash key 59 struct drm_hash_item *hash; drm_find_file() local 63 if (!drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { drm_find_file() 64 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_find_file() 79 * associated the magic number hash key in drm_device::magiclist, while holding 109 * number hash key, while holding the drm_device::struct_mutex lock. 114 struct drm_hash_item *hash; drm_remove_magic() local 120 if (drm_ht_find_item(&master->magiclist, (unsigned long)magic, &hash)) { drm_remove_magic() 124 pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_remove_magic() 125 drm_ht_remove_item(&master->magiclist, hash); drm_remove_magic()
|
/linux-4.1.27/fs/nfs/ |
H A D | pnfs_dev.c | 80 long hash) _lookup_deviceid() 84 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) _lookup_deviceid() 173 const struct nfs4_deviceid *id, long hash) __nfs4_find_get_deviceid() 179 hash); __nfs4_find_get_deviceid() 191 long hash = nfs4_deviceid_hash(id); nfs4_find_get_deviceid() local 194 d = __nfs4_find_get_deviceid(server, id, hash); nfs4_find_get_deviceid() 203 d = __nfs4_find_get_deviceid(server, id, hash); nfs4_find_get_deviceid() 209 hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); nfs4_find_get_deviceid() 312 _deviceid_purge_client(const struct nfs_client *clp, long hash) _deviceid_purge_client() argument 319 hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) _deviceid_purge_client() 78 _lookup_deviceid(const struct pnfs_layoutdriver_type *ld, const struct nfs_client *clp, const struct nfs4_deviceid *id, long hash) _lookup_deviceid() argument 172 __nfs4_find_get_deviceid(struct nfs_server *server, const struct nfs4_deviceid *id, long hash) __nfs4_find_get_deviceid() argument
|
/linux-4.1.27/include/linux/sunrpc/ |
H A D | svcauth.h | 18 #include <linux/hash.h> 70 struct hlist_node hash; member in struct:auth_domain 163 unsigned long hash = 0; hash_str() local 174 hash = hash_long(hash^l, BITS_PER_LONG); hash_str() 176 return hash >> (BITS_PER_LONG - bits); hash_str() 181 unsigned long hash = 0; hash_mem() local 193 hash = hash_long(hash^l, BITS_PER_LONG); hash_mem() 195 return hash >> (BITS_PER_LONG - bits); hash_mem()
|
H A D | cache.h | 30 * - A function to calculate a hash of an item's key. 33 * (e.g. hash size, goal_age, etc). 39 * in the hash table. 143 struct hlist_node hash; /* on hash chain */ member in struct:cache_deferred_req 177 struct cache_head *key, int hash); 180 struct cache_head *new, struct cache_head *old, int hash);
|
/linux-4.1.27/kernel/ |
H A D | module_signing.c | 15 #include <crypto/hash.h> 32 u8 hash; /* Digest algorithm [enum hash_algo] */ member in struct:module_signature 43 static struct public_key_signature *mod_make_digest(enum hash_algo hash, mod_make_digest() argument 56 * big the hash operational data will be. mod_make_digest() 58 tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); mod_make_digest() 65 /* We allocate the hash operational data storage on the end of our mod_make_digest() 73 pks->pkey_hash_algo = hash; mod_make_digest() 220 if (ms.hash >= PKEY_HASH__LAST || mod_verify_sig() 221 !hash_algo_name[ms.hash]) mod_verify_sig() 229 pks = mod_make_digest(ms.hash, mod, modlen); mod_verify_sig()
|
H A D | workqueue_internal.h | 17 * either serving the manager role, on idle list or on busy hash. For 23 /* on idle list while idle, on busy hash table while busy */
|
/linux-4.1.27/drivers/crypto/ux500/hash/ |
H A D | hash_core.c | 31 #include <crypto/internal/hash.h> 91 * @device_data: Structure for the hash device. 103 * release_hash_device - Releases a previously allocated hash device. 104 * @device_data: Structure for the hash device. 226 * @device_data: Structure for the hash device. 281 "%s: Continue hash calculation, since hmac key available\n", get_empty_message_digest() 292 * @device_data: Structure for the hash device. 329 * @device_data: Structure for the hash device. 372 * hash_get_device_data - Checks for an available hash device and return it. 373 * @hash_ctx: Structure for the hash context. 374 * @device_data: Structure for the hash device. 376 * This function check for an available hash device and return it to 433 * @device_data: Structure for the hash device. 478 * init_hash_hw - Initialise the hash hardware for a new calculation. 479 * @device_data: Structure for the hash device. 480 * @ctx: The hash context. 559 * hash_init - Common hash init function for SHA1/SHA2 (SHA256). 560 * @req: The hash request for the job. 599 * @device_data: Structure for the hash device. 621 * @device_data: Structure for the hash device. 635 * Clear hash str register, only clear NBLW hash_messagepad() 685 * hash_setconfiguration - Sets the required configuration for the hash 687 * @device_data: Structure for the hash device. 743 } else { /* Wrong hash mode */ hash_setconfiguration() 752 * hash_begin - This routine resets some globals and initializes the hash 754 * @device_data: Structure for the hash device. 865 * hash_dma_final - The hash dma final function for SHA1/SHA256. 866 * @req: The hash request for the job. 971 * hash_hw_final - The final hash calculation function 972 * @req: The hash request for the job. 1137 * @device_state: The state to be restored in the hash hardware 1304 * hash_update - The hash update function for SHA1/SHA2 (SHA256). 1305 * @req: The hash request for the job. 1324 * hash_final - The hash final function for SHA1/SHA2 (SHA256). 1325 * @req: The hash request for the job. 1492 struct ahash_alg hash; member in struct:hash_algo_template 1503 hash); hash_cra_init() 1512 ctx->digestsize = hash_alg->hash.halg.digestsize; hash_cra_init() 1521 .hash = { 1543 .hash = { 1566 .hash = { 1590 .hash = { 1623 ret = crypto_register_ahash(&hash_algs[i].hash); ahash_algs_register_all() 1627 hash_algs[i].hash.halg.base.cra_driver_name); ahash_algs_register_all() 1634 crypto_unregister_ahash(&hash_algs[i].hash); ahash_algs_register_all() 1646 crypto_unregister_ahash(&hash_algs[i].hash); ahash_algs_unregister_all() 1650 * ux500_hash_probe - Function that probes the hash hardware. 1774 * ux500_hash_remove - Function that removes the hash device from the platform. 1834 * ux500_hash_shutdown - Function that shutdown the hash device. 1886 * ux500_hash_suspend - Function that suspends the hash device. 1923 * ux500_hash_resume - Function that resume the hash device. 1958 { .compatible = "stericsson,ux500-hash" },
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lustre_lite.h | 123 * Chain of hash overflow pages. 137 static inline unsigned long hash_x_index(__u64 hash, int hash64) hash_x_index() argument 140 hash >>= 32; hash_x_index() 141 /* save hash 0 as index 0 because otherwise we'll save it at hash_x_index() 145 return ~0UL - (hash + !hash); hash_x_index()
|
/linux-4.1.27/fs/hfs/ |
H A D | string.c | 57 unsigned int hash, len = this->len; hfs_hash_dentry() local 62 hash = init_name_hash(); hfs_hash_dentry() 64 hash = partial_name_hash(caseorder[*name++], hash); hfs_hash_dentry() 65 this->hash = end_name_hash(hash); hfs_hash_dentry()
|
/linux-4.1.27/security/tomoyo/ |
H A D | memory.c | 7 #include <linux/hash.h> 150 unsigned int hash; tomoyo_get_name() local 157 hash = full_name_hash((const unsigned char *) name, len - 1); tomoyo_get_name() 158 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; tomoyo_get_name() 162 if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || list_for_each_entry()
|
/linux-4.1.27/fs/adfs/ |
H A D | dir.c | 198 unsigned long hash; adfs_hash() local 210 hash = init_name_hash(); adfs_hash() 218 hash = partial_name_hash(c, hash); adfs_hash() 220 qstr->hash = end_name_hash(hash); adfs_hash()
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_attr_sf.h | 47 * We generate this then sort it, attr_list() must return things in hash-order. 54 xfs_dahash_t hash; /* this entry's hash value */ member in struct:xfs_attr_sf_sort
|
/linux-4.1.27/arch/arm64/crypto/ |
H A D | sha1-ce-glue.c | 2 * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions 13 #include <crypto/internal/hash.h> 23 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
|
/linux-4.1.27/drivers/crypto/ |
H A D | Makefile | 9 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
|
/linux-4.1.27/arch/powerpc/kernel/vdso32/ |
H A D | vdso32.lds.S | 19 .hash : { *(.hash) } :text 20 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/arch/powerpc/kernel/vdso64/ |
H A D | vdso64.lds.S | 19 .hash : { *(.hash) } :text 20 .gnu.hash : { *(.gnu.hash) }
|
/linux-4.1.27/fs/dlm/ |
H A D | dir.h | 18 int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
|
H A D | dir.c | 27 * We use the upper 16 bits of the hash value to select the directory node. 28 * Low bits are used for distribution of rsb's among hash buckets on each node. 31 * num_nodes to the hash value. This value in the desired range is used as an 35 int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash) dlm_hash2nodeid() argument 42 node = (hash >> 16) % ls->ls_total_weight; dlm_hash2nodeid() 203 uint32_t hash, bucket; find_rsb_root() local 206 hash = jhash(name, len, 0); find_rsb_root() 207 bucket = hash & (ls->ls_rsbtbl_size - 1); find_rsb_root()
|
/linux-4.1.27/fs/jbd/ |
H A D | revoke.c | 64 * We keep two hash tables of revoke records. One hashtable belongs to the 66 * belongs to the committing transaction. Accesses to the second hash table 73 * All users operating on the hash table belonging to the running transaction 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 78 * Finally, also replay code uses the hash tables but at this moment no one else 96 #include <linux/hash.h> 107 struct list_head hash; member in struct:jbd_revoke_record_s 113 /* The revoke table is just a simple hash table of revoke records. */ 116 /* It is conceivable that we might want a larger hash table 133 static inline int hash(journal_t *journal, unsigned int block) hash() function 153 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; insert_revoke_hash() 155 list_add(&record->hash, hash_list); insert_revoke_hash() 167 /* Find a revoke record in the journal's hash table. */ 175 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; find_revoke_record() 179 while (&(record->hash) != hash_list) { find_revoke_record() 184 record = (struct jbd_revoke_record_s *) record->hash.next; find_revoke_record() 326 * the hash tables without an attached journal_head. 356 BUFFER_TRACE(bh, "found on hash"); journal_revoke() 450 list_del(&record->hash); journal_cancel_revoke() 528 * revoke hash, deleting the entries as we go. 557 list_del(&record->hash); journal_write_revoke_records() 729 list_del(&record->hash); journal_clear_revoke()
|
/linux-4.1.27/fs/jbd2/ |
H A D | revoke.c | 64 * We keep two hash tables of revoke records. One hashtable belongs to the 66 * belongs to the committing transaction. Accesses to the second hash table 73 * All users operating on the hash table belonging to the running transaction 75 * switching hash tables under them. For operations on the lists of entries in 76 * the hash table j_revoke_lock is used. 78 * Finally, also replay code uses the hash tables but at this moment no one else 95 #include <linux/hash.h> 107 struct list_head hash; member in struct:jbd2_revoke_record_s 113 /* The revoke table is just a simple hash table of revoke records. */ 116 /* It is conceivable that we might want a larger hash table 134 static inline int hash(journal_t *journal, unsigned long long block) hash() function 152 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; insert_revoke_hash() 154 list_add(&record->hash, hash_list); insert_revoke_hash() 166 /* Find a revoke record in the journal's hash table. */ 174 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)]; find_revoke_record() 178 while (&(record->hash) != hash_list) { find_revoke_record() 183 record = (struct jbd2_revoke_record_s *) record->hash.next; find_revoke_record() 323 * the hash tables without an attached journal_head. 353 BUFFER_TRACE(bh, "found on hash"); jbd2_journal_revoke() 447 list_del(&record->hash); jbd2_journal_cancel_revoke() 525 * revoke hash, deleting the entries as we go. 556 list_del(&record->hash); jbd2_journal_write_revoke_records() 760 list_del(&record->hash); jbd2_journal_clear_revoke()
|
/linux-4.1.27/fs/nfsd/ |
H A D | export.c | 7 * creates a client control block and adds it to the hash 251 int hash = item->ek_fsidtype; svc_expkey_hash() local 255 hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); svc_expkey_hash() 256 hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); svc_expkey_hash() 257 hash &= EXPKEY_HASHMASK; svc_expkey_hash() 258 return hash; svc_expkey_hash() 265 int hash = svc_expkey_hash(item); svc_expkey_lookup() local 267 ch = sunrpc_cache_lookup(cd, &item->h, hash); svc_expkey_lookup() 279 int hash = svc_expkey_hash(new); svc_expkey_update() local 281 ch = sunrpc_cache_update(cd, &new->h, &old->h, hash); svc_expkey_update() 767 int hash; svc_export_hash() local 769 hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); svc_export_hash() 770 hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); svc_export_hash() 771 hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); svc_export_hash() 772 return hash; svc_export_hash() 779 int hash = svc_export_hash(exp); svc_export_lookup() local 781 ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash); svc_export_lookup() 792 int hash = svc_export_hash(old); svc_export_update() local 794 ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash); svc_export_update() 1084 unsigned hash, export; __acquires() local 1092 hash = n >> 32; __acquires() 1096 for (ch=export_table[hash]; ch; ch=ch->next) __acquires() 1101 hash++; __acquires() 1103 } while(hash < EXPORT_HASHMAX && export_table[hash]==NULL); __acquires() 1104 if (hash >= EXPORT_HASHMAX) __acquires() 1107 return export_table[hash]; __acquires() 1113 int hash = (*pos >> 32); e_next() local 1118 hash = 0; e_next() 1120 hash++; e_next() 1127 while (hash < EXPORT_HASHMAX && export_table[hash] == NULL) { e_next() 1128 hash++; e_next() 1131 if (hash >= EXPORT_HASHMAX) e_next() 1134 return export_table[hash]; e_next()
|
/linux-4.1.27/fs/affs/ |
H A D | namei.c | 67 unsigned long hash; __affs_hash_dentry() local 75 hash = init_name_hash(); __affs_hash_dentry() 78 hash = partial_name_hash(toupper(*name), hash); __affs_hash_dentry() 79 qstr->hash = end_name_hash(hash); __affs_hash_dentry() 178 u32 hash; affs_hash_name() local 180 hash = len = min(len, AFFSNAMEMAX); affs_hash_name() 182 hash = (hash * 13 + toupper(*name++)) & 0x7ff; affs_hash_name() 184 return hash % AFFS_SB(sb)->s_hashsize; affs_hash_name()
|
/linux-4.1.27/fs/logfs/ |
H A D | dir.c | 91 * Prime value was chosen to be roughly 256 + 26. r5 hash uses 11, 100 u32 hash = seed; hash_32() local 104 hash = hash * 293 + s[i]; hash_32() 105 return hash; hash_32() 111 * indirect blocks. The number of possible locations for a given hash 116 * So we use the following scheme. First we reduce the hash to 0..15 117 * and try a direct block. If that is occupied we reduce the hash to 119 * blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff, 122 * Using 16 entries should allow for a reasonable amount of hash 134 static pgoff_t hash_index(u32 hash, int round) hash_index() argument 143 return hash % i0_blocks; hash_index() 145 return i0_blocks + hash % (i1_blocks - i0_blocks); hash_index() 147 return i1_blocks + hash % (i2_blocks - i1_blocks); hash_index() 149 return i2_blocks + hash % (i3_blocks - i2_blocks); hash_index() 151 return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16)) hash_index() 162 u32 hash = hash_32(name->name, name->len, 0); logfs_get_dd_page() local 170 index = hash_index(hash, round); logfs_get_dd_page() 373 u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0); logfs_write_dir() local 378 index = hash_index(hash, round); logfs_write_dir() 402 * too many collisions for this particular hash and no fallback. logfs_write_dir()
|
/linux-4.1.27/lib/xz/ |
H A D | xz_dec_stream.c | 93 struct xz_dec_hash hash; member in struct:xz_dec::__anon14040 112 * Hash calculated from the Records (matches block.hash in 115 struct xz_dec_hash hash; member in struct:xz_dec::__anon14041 215 * the sizes possibly stored in the Block Header. Update the hash and 259 s->block.hash.unpadded += s->block_header.size dec_block() 263 s->block.hash.unpadded += check_sizes[s->check_type]; dec_block() 266 s->block.hash.unpadded += 4; dec_block() 269 s->block.hash.uncompressed += s->block.uncompressed; dec_block() 270 s->block.hash.crc32 = xz_crc32( dec_block() 271 (const uint8_t *)&s->block.hash, dec_block() 272 sizeof(s->block.hash), s->block.hash.crc32); dec_block() 323 s->index.hash.unpadded += s->vli; dec_index() 328 s->index.hash.uncompressed += s->vli; dec_index() 329 s->index.hash.crc32 = xz_crc32( dec_index() 330 (const uint8_t *)&s->index.hash, dec_index() 331 sizeof(s->index.hash), dec_index() 332 s->index.hash.crc32); dec_index() 684 if (!memeq(&s->block.hash, &s->index.hash, dec_main() 685 sizeof(s->block.hash))) dec_main()
|
/linux-4.1.27/drivers/misc/vmw_vmci/ |
H A D | vmci_resource.c | 17 #include <linux/hash.h> 43 * Gets a resource (if one exists) matching given handle from the hash table. 149 /* Remove resource from hash table. */ vmci_resource_remove() 201 /* Verify the resource has been unlinked from hash table */ vmci_release_resource() 211 * can increment the count again (it's gone from the resource hash
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | l2t.h | 52 * packets awaiting address resolution. Second, it is a node of a hash table 54 * pointer. Finally, each node is a bucket of a hash table, pointing to the 63 struct l2t_entry *first; /* start of hash chain */ 69 u16 hash; /* hash bucket the entry is on */ member in struct:l2t_entry
|
H A D | clip_tbl.c | 80 int hash; cxgb4_clip_get() local 86 hash = clip_addr_hash(ctbl, addr, v6); cxgb4_clip_get() 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { cxgb4_clip_get() 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]); cxgb4_clip_get() 146 int hash; cxgb4_clip_release() local 149 hash = clip_addr_hash(ctbl, addr, v6); cxgb4_clip_release() 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { cxgb4_clip_release()
|
/linux-4.1.27/include/trace/events/ |
H A D | net.h | 156 __field( u32, hash ) 182 __entry->hash = skb->hash; 194 TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x", 198 __entry->hash, __entry->l4_hash, __entry->len,
|
/linux-4.1.27/scripts/ |
H A D | sign-file | 7 "Usage: scripts/sign-file [-v] <hash algo> <key> <x509> <module> [<dest>]\n" . 8 " scripts/sign-file [-v] -s <raw sig> <hash algo> <x509> <module> [<dest>]\n"; 312 my $hash = 0; # Digest algorithm 324 $hash = 2; 330 $hash = 7; 336 $hash = 4; 342 $hash = 5; 348 $hash = 6; 350 die "Unknown hash algorithm: $dgst\n"; 391 $algo, $hash, $id_type,
|
/linux-4.1.27/fs/ |
H A D | mbcache.c | 19 * in the cache. A valid entry is in the main hash tables of the cache, 32 * Each hash chain of both the block and index hash tables now contains 33 * a built-in lock used to serialize accesses to the hash chain. 43 * Each block hash chain's lock has the highest lock order, followed by an 44 * index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's 46 * either a block or index hash chain lock, a thread can acquire an 52 * index hash chian, it needs to lock the corresponding hash chain. For each 59 * block hash chain and also no longer being referenced, both e_used, 61 * first removed from a block hash chain. 67 #include <linux/hash.h> 176 * This function is called to unhash both the block and index hash 178 * It assumes both the block and index hash chain is locked upon entry. 179 * It also unlock both hash chains both exit 322 * @bucket_bits: log2(number of hash buckets) 366 * Set an upper limit on the number of cache entries so that the hash mb_cache_create() 672 /* First serialize access to the block corresponding hash chain. */ mb_cache_entry_get() 723 /* The index hash chain is alredy acquire by caller. */ __mb_cache_entry_find()
|
/linux-4.1.27/net/openvswitch/ |
H A D | flow_table.c | 316 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) find_bucket() argument 318 hash = jhash_1word(hash, ti->hash_seed); find_bucket() 320 (hash & (ti->n_buckets - 1))); find_bucket() 328 head = find_bucket(ti, flow->flow_table.hash); table_instance_insert() 337 head = find_bucket(ti, flow->ufid_table.hash); ufid_table_instance_insert() 421 /* Make sure number of hash bytes are multiple of u32. */ flow_hash() 475 u32 hash; masked_flow_lookup() local 479 hash = flow_hash(&masked_key, &mask->range); masked_flow_lookup() 480 head = find_bucket(ti, hash); hlist_for_each_entry_rcu() 482 if (flow->mask == mask && flow->flow_table.hash == hash && hlist_for_each_entry_rcu() 560 u32 hash; ovs_flow_tbl_lookup_ufid() local 562 hash = ufid_hash(ufid); ovs_flow_tbl_lookup_ufid() 563 head = find_bucket(ti, hash); hlist_for_each_entry_rcu() 565 if (flow->ufid_table.hash == hash && hlist_for_each_entry_rcu() 693 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); flow_key_insert() 716 flow->ufid_table.hash = ufid_hash(&flow->id); flow_ufid_insert()
|
/linux-4.1.27/fs/kernfs/ |
H A D | dir.c | 17 #include <linux/hash.h> 180 * @name: Null terminated string to hash 181 * @ns: Namespace tag to hash 183 * Returns 31 bit hash of ns + name (so it fits in an off_t ) 187 unsigned long hash = init_name_hash(); kernfs_name_hash() local 190 hash = partial_name_hash(*name++, hash); kernfs_name_hash() 191 hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31)); kernfs_name_hash() 192 hash &= 0x7fffffffU; kernfs_name_hash() 193 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ kernfs_name_hash() 194 if (hash < 2) kernfs_name_hash() 195 hash += 2; kernfs_name_hash() 196 if (hash >= INT_MAX) kernfs_name_hash() 197 hash = INT_MAX - 1; kernfs_name_hash() 198 return hash; kernfs_name_hash() 201 static int kernfs_name_compare(unsigned int hash, const char *name, kernfs_name_compare() argument 204 if (hash < kn->hash) kernfs_name_compare() 206 if (hash > kn->hash) kernfs_name_compare() 218 return kernfs_name_compare(left->hash, left->name, left->ns, right); kernfs_sd_compare() 601 kn->hash = kernfs_name_hash(kn->name, kn->ns); kernfs_add_one() 647 unsigned int hash; kernfs_find_ns() local 657 hash = kernfs_name_hash(name, ns); kernfs_find_ns() 663 result = kernfs_name_compare(hash, name, ns, kn); kernfs_find_ns() 853 /* instantiate and hash dentry */ kernfs_iop_lookup() 1335 kn->hash = kernfs_name_hash(kn->name, kn->ns); kernfs_rename_ns() 1360 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) kernfs_dir_pos() 1364 pos->parent == parent && hash == pos->hash; kernfs_dir_pos() 1369 if (!pos && (hash > 1) && (hash < INT_MAX)) { kernfs_dir_pos() 1374 if (hash < pos->hash) kernfs_dir_pos() 1376 else if (hash > pos->hash) kernfs_dir_pos() 1431 ctx->pos = pos->hash; kernfs_fop_readdir() 1359 kernfs_dir_pos(const void *ns, struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) kernfs_dir_pos() argument
|
/linux-4.1.27/include/net/netfilter/ |
H A D | nf_conntrack.h | 75 /* Usage count in here is 1 for hash table/destruct timer, 1 per skb, 122 nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash) nf_ct_tuplehash_to_ctrack() argument 124 return container_of(hash, struct nf_conn, nf_ct_tuplehash_to_ctrack() 125 tuplehash[hash->tuple.dst.dir]); nf_ct_tuplehash_to_ctrack() 159 /* Return conntrack_info and tuple hash for given skb. */ 184 void nf_ct_free_hashtable(void *hash, unsigned int size); 264 /* It's confirmed if it is, or has been in the hash table. */ nf_ct_is_confirmed()
|
/linux-4.1.27/fs/efivarfs/ |
H A D | super.c | 68 unsigned long hash = init_name_hash(); efivarfs_d_hash() local 76 hash = partial_name_hash(*s++, hash); efivarfs_d_hash() 80 hash = partial_name_hash(tolower(*s++), hash); efivarfs_d_hash() 82 qstr->hash = end_name_hash(hash); efivarfs_d_hash()
|
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/ |
H A D | console.h | 65 struct list_head ndl_hlink; /* chain on hash */ 78 struct list_head grp_ndl_hash[0];/* hash table for nodes */ 100 struct list_head *bat_cli_hash; /* hash table of client nodes */ 102 struct list_head *bat_srv_hash; /* hash table of server nodes */ 127 #define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ 128 #define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ 162 struct list_head *ses_ndl_hash; /* hash table of nodes */ 178 lstcon_id2hash (lnet_process_id_t id, struct list_head *hash) lstcon_id2hash() argument 182 return &hash[idx]; lstcon_id2hash()
|
/linux-4.1.27/arch/powerpc/kvm/ |
H A D | book3s_32_mmu_host.c | 76 * a hash, so we don't waste cycles on looping */ kvmppc_sid_hash() 120 u32 page, hash; kvmppc_mmu_get_pteg() local 125 hash = ((vsid ^ page) << 6); kvmppc_mmu_get_pteg() 127 hash = ~hash; kvmppc_mmu_get_pteg() 129 hash &= htabmask; kvmppc_mmu_get_pteg() 131 pteg |= hash; kvmppc_mmu_get_pteg() 133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", kvmppc_mmu_get_pteg() 134 htab, hash, htabmask, pteg); kvmppc_mmu_get_pteg()
|
H A D | book3s_64_mmu_host.c | 42 * a hash, so we don't waste cycles on looping */ kvmppc_sid_hash() 86 ulong hash, hpteg; kvmppc_mmu_map_page() local 156 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); kvmppc_mmu_map_page() 167 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); kvmppc_mmu_map_page() 181 hash = ~hash; kvmppc_mmu_map_page() 192 hash = ~hash; kvmppc_mmu_map_page() 193 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); kvmppc_mmu_map_page()
|
/linux-4.1.27/include/drm/ttm/ |
H A D | ttm_object.h | 96 * @hash: hash entry for the per-device object hash. 105 * including the hash entry. A reference to a base object can 112 * already been taken out of the per-device hash. The parameter 127 struct drm_hash_item hash; member in struct:ttm_base_object 270 * @hash_order: Order of the hash table used to hold the reference objects. 296 * @hash_order: Order of hash table used to hash the base objects.
|
/linux-4.1.27/fs/ncpfs/ |
H A D | ncpsign_kernel.c | 112 unsigned char hash[16]; sign_verify_reply() local 122 nwsign(server->sign_last, data, hash); sign_verify_reply() 123 return memcmp(sign_buff, hash, 8); sign_verify_reply()
|
/linux-4.1.27/fs/ext2/ |
H A D | xattr.c | 829 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); ext2_xattr_cache_insert() local 836 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash); ext2_xattr_cache_insert() 845 ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash, ext2_xattr_cache_insert() 903 __u32 hash = le32_to_cpu(header->h_hash); ext2_xattr_cache_find() local 908 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); ext2_xattr_cache_find() 911 hash); ext2_xattr_cache_find() 943 ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash); ext2_xattr_cache_find() 954 * Compute the hash of an extended attribute. 959 __u32 hash = 0; ext2_xattr_hash_entry() local 964 hash = (hash << NAME_HASH_SHIFT) ^ ext2_xattr_hash_entry() 965 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ ext2_xattr_hash_entry() 974 hash = (hash << VALUE_HASH_SHIFT) ^ ext2_xattr_hash_entry() 975 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ ext2_xattr_hash_entry() 979 entry->e_hash = cpu_to_le32(hash); ext2_xattr_hash_entry() 990 * Re-compute the extended attribute hash value after an entry has changed. 996 __u32 hash = 0; ext2_xattr_rehash() local 1002 /* Block is not shared if an entry's hash value == 0 */ ext2_xattr_rehash() 1003 hash = 0; ext2_xattr_rehash() 1006 hash = (hash << BLOCK_HASH_SHIFT) ^ ext2_xattr_rehash() 1007 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ ext2_xattr_rehash() 1011 header->h_hash = cpu_to_le32(hash); ext2_xattr_rehash()
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | iwpm_util.c | 64 pr_err("%s Unable to create mapinfo hash table\n", __func__); iwpm_init() 72 pr_err("%s Unable to create reminfo hash table\n", __func__); iwpm_init() 206 /* free the hash list */ free_hash_bucket() 229 /* free the hash list */ free_reminfo_bucket() 520 u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0); iwpm_ipv6_jhash() local 521 return hash; iwpm_ipv6_jhash() 527 u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0); iwpm_ipv4_jhash() local 528 return hash; iwpm_ipv4_jhash() 532 struct sockaddr_storage *b_sockaddr, u32 *hash) get_hash_bucket() 549 *hash = a_hash; get_hash_bucket() 551 *hash = jhash_2words(a_hash, b_hash, 0); get_hash_bucket() 559 u32 hash; get_mapinfo_hash_bucket() local 562 ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash); get_mapinfo_hash_bucket() 565 return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK]; get_mapinfo_hash_bucket() 572 u32 hash; get_reminfo_hash_bucket() local 575 ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash); get_reminfo_hash_bucket() 578 return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK]; get_reminfo_hash_bucket() 531 get_hash_bucket(struct sockaddr_storage *a_sockaddr, struct sockaddr_storage *b_sockaddr, u32 *hash) get_hash_bucket() argument
|