1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/libcfs/hash.c
37 *
38 * Implement a hash class for hash process in lustre system.
39 *
40 * Author: YuZhangyong <yzy@clusterfs.com>
41 *
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
49 *
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(instead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
56 *
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
61 *
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
65 *
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
70 *
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
74 *
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
77 * are expensive.
78 *
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
83 *
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
91 *
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
95 *
96 * - support long name hash table (for param-tree)
97 *
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
104 *
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
108 */
109
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
cfs_hash_nl_lock(union cfs_hash_lock * lock,int exclusive)122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
cfs_hash_nl_unlock(union cfs_hash_lock * lock,int exclusive)125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
126
127 static inline void
cfs_hash_spin_lock(union cfs_hash_lock * lock,int exclusive)128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129 __acquires(&lock->spin)
130 {
131 spin_lock(&lock->spin);
132 }
133
134 static inline void
cfs_hash_spin_unlock(union cfs_hash_lock * lock,int exclusive)135 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
136 __releases(&lock->spin)
137 {
138 spin_unlock(&lock->spin);
139 }
140
141 static inline void
cfs_hash_rw_lock(union cfs_hash_lock * lock,int exclusive)142 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
143 __acquires(&lock->rw)
144 {
145 if (!exclusive)
146 read_lock(&lock->rw);
147 else
148 write_lock(&lock->rw);
149 }
150
151 static inline void
cfs_hash_rw_unlock(union cfs_hash_lock * lock,int exclusive)152 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
153 __releases(&lock->rw)
154 {
155 if (!exclusive)
156 read_unlock(&lock->rw);
157 else
158 write_unlock(&lock->rw);
159 }
160
161 /** No lock hash */
162 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
163 .hs_lock = cfs_hash_nl_lock,
164 .hs_unlock = cfs_hash_nl_unlock,
165 .hs_bkt_lock = cfs_hash_nl_lock,
166 .hs_bkt_unlock = cfs_hash_nl_unlock,
167 };
168
169 /** no bucket lock, one spinlock to protect everything */
170 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
171 .hs_lock = cfs_hash_spin_lock,
172 .hs_unlock = cfs_hash_spin_unlock,
173 .hs_bkt_lock = cfs_hash_nl_lock,
174 .hs_bkt_unlock = cfs_hash_nl_unlock,
175 };
176
177 /** spin bucket lock, rehash is enabled */
178 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
179 .hs_lock = cfs_hash_rw_lock,
180 .hs_unlock = cfs_hash_rw_unlock,
181 .hs_bkt_lock = cfs_hash_spin_lock,
182 .hs_bkt_unlock = cfs_hash_spin_unlock,
183 };
184
185 /** rw bucket lock, rehash is enabled */
186 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
187 .hs_lock = cfs_hash_rw_lock,
188 .hs_unlock = cfs_hash_rw_unlock,
189 .hs_bkt_lock = cfs_hash_rw_lock,
190 .hs_bkt_unlock = cfs_hash_rw_unlock,
191 };
192
193 /** spin bucket lock, rehash is disabled */
194 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
195 .hs_lock = cfs_hash_nl_lock,
196 .hs_unlock = cfs_hash_nl_unlock,
197 .hs_bkt_lock = cfs_hash_spin_lock,
198 .hs_bkt_unlock = cfs_hash_spin_unlock,
199 };
200
201 /** rw bucket lock, rehash is disabled */
202 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
203 .hs_lock = cfs_hash_nl_lock,
204 .hs_unlock = cfs_hash_nl_unlock,
205 .hs_bkt_lock = cfs_hash_rw_lock,
206 .hs_bkt_unlock = cfs_hash_rw_unlock,
207 };
208
209 static void
cfs_hash_lock_setup(struct cfs_hash * hs)210 cfs_hash_lock_setup(struct cfs_hash *hs)
211 {
212 if (cfs_hash_with_no_lock(hs)) {
213 hs->hs_lops = &cfs_hash_nl_lops;
214
215 } else if (cfs_hash_with_no_bktlock(hs)) {
216 hs->hs_lops = &cfs_hash_nbl_lops;
217 spin_lock_init(&hs->hs_lock.spin);
218
219 } else if (cfs_hash_with_rehash(hs)) {
220 rwlock_init(&hs->hs_lock.rw);
221
222 if (cfs_hash_with_rw_bktlock(hs))
223 hs->hs_lops = &cfs_hash_bkt_rw_lops;
224 else if (cfs_hash_with_spin_bktlock(hs))
225 hs->hs_lops = &cfs_hash_bkt_spin_lops;
226 else
227 LBUG();
228 } else {
229 if (cfs_hash_with_rw_bktlock(hs))
230 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
231 else if (cfs_hash_with_spin_bktlock(hs))
232 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
233 else
234 LBUG();
235 }
236 }
237
238 /**
239 * Simple hash head without depth tracking
240 * new element is always added to head of hlist
241 */
242 struct cfs_hash_head {
243 struct hlist_head hh_head; /**< entries list */
244 };
245
246 static int
cfs_hash_hh_hhead_size(struct cfs_hash * hs)247 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
248 {
249 return sizeof(struct cfs_hash_head);
250 }
251
252 static struct hlist_head *
cfs_hash_hh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)253 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
254 {
255 struct cfs_hash_head *head;
256
257 head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
258 return &head[bd->bd_offset].hh_head;
259 }
260
261 static int
cfs_hash_hh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)262 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
263 struct hlist_node *hnode)
264 {
265 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
266 return -1; /* unknown depth */
267 }
268
269 static int
cfs_hash_hh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)270 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
271 struct hlist_node *hnode)
272 {
273 hlist_del_init(hnode);
274 return -1; /* unknown depth */
275 }
276
277 /**
278 * Simple hash head with depth tracking
279 * new element is always added to head of hlist
280 */
281 struct cfs_hash_head_dep {
282 struct hlist_head hd_head; /**< entries list */
283 unsigned int hd_depth; /**< list length */
284 };
285
286 static int
cfs_hash_hd_hhead_size(struct cfs_hash * hs)287 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
288 {
289 return sizeof(struct cfs_hash_head_dep);
290 }
291
292 static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)293 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
294 {
295 struct cfs_hash_head_dep *head;
296
297 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
298 return &head[bd->bd_offset].hd_head;
299 }
300
301 static int
cfs_hash_hd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)302 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
303 struct hlist_node *hnode)
304 {
305 struct cfs_hash_head_dep *hh;
306
307 hh = container_of(cfs_hash_hd_hhead(hs, bd),
308 struct cfs_hash_head_dep, hd_head);
309 hlist_add_head(hnode, &hh->hd_head);
310 return ++hh->hd_depth;
311 }
312
313 static int
cfs_hash_hd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)314 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
315 struct hlist_node *hnode)
316 {
317 struct cfs_hash_head_dep *hh;
318
319 hh = container_of(cfs_hash_hd_hhead(hs, bd),
320 struct cfs_hash_head_dep, hd_head);
321 hlist_del_init(hnode);
322 return --hh->hd_depth;
323 }
324
325 /**
326 * double links hash head without depth tracking
327 * new element is always added to tail of hlist
328 */
329 struct cfs_hash_dhead {
330 struct hlist_head dh_head; /**< entries list */
331 struct hlist_node *dh_tail; /**< the last entry */
332 };
333
334 static int
cfs_hash_dh_hhead_size(struct cfs_hash * hs)335 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
336 {
337 return sizeof(struct cfs_hash_dhead);
338 }
339
340 static struct hlist_head *
cfs_hash_dh_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)341 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
342 {
343 struct cfs_hash_dhead *head;
344
345 head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
346 return &head[bd->bd_offset].dh_head;
347 }
348
349 static int
cfs_hash_dh_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)350 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
351 struct hlist_node *hnode)
352 {
353 struct cfs_hash_dhead *dh;
354
355 dh = container_of(cfs_hash_dh_hhead(hs, bd),
356 struct cfs_hash_dhead, dh_head);
357 if (dh->dh_tail != NULL) /* not empty */
358 hlist_add_behind(hnode, dh->dh_tail);
359 else /* empty list */
360 hlist_add_head(hnode, &dh->dh_head);
361 dh->dh_tail = hnode;
362 return -1; /* unknown depth */
363 }
364
365 static int
cfs_hash_dh_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)366 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
367 struct hlist_node *hnd)
368 {
369 struct cfs_hash_dhead *dh;
370
371 dh = container_of(cfs_hash_dh_hhead(hs, bd),
372 struct cfs_hash_dhead, dh_head);
373 if (hnd->next == NULL) { /* it's the tail */
374 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
375 container_of(hnd->pprev, struct hlist_node, next);
376 }
377 hlist_del_init(hnd);
378 return -1; /* unknown depth */
379 }
380
381 /**
382 * double links hash head with depth tracking
383 * new element is always added to tail of hlist
384 */
385 struct cfs_hash_dhead_dep {
386 struct hlist_head dd_head; /**< entries list */
387 struct hlist_node *dd_tail; /**< the last entry */
388 unsigned int dd_depth; /**< list length */
389 };
390
391 static int
cfs_hash_dd_hhead_size(struct cfs_hash * hs)392 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
393 {
394 return sizeof(struct cfs_hash_dhead_dep);
395 }
396
397 static struct hlist_head *
cfs_hash_dd_hhead(struct cfs_hash * hs,struct cfs_hash_bd * bd)398 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
399 {
400 struct cfs_hash_dhead_dep *head;
401
402 head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
403 return &head[bd->bd_offset].dd_head;
404 }
405
406 static int
cfs_hash_dd_hnode_add(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)407 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
408 struct hlist_node *hnode)
409 {
410 struct cfs_hash_dhead_dep *dh;
411
412 dh = container_of(cfs_hash_dd_hhead(hs, bd),
413 struct cfs_hash_dhead_dep, dd_head);
414 if (dh->dd_tail != NULL) /* not empty */
415 hlist_add_behind(hnode, dh->dd_tail);
416 else /* empty list */
417 hlist_add_head(hnode, &dh->dd_head);
418 dh->dd_tail = hnode;
419 return ++dh->dd_depth;
420 }
421
422 static int
cfs_hash_dd_hnode_del(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnd)423 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
424 struct hlist_node *hnd)
425 {
426 struct cfs_hash_dhead_dep *dh;
427
428 dh = container_of(cfs_hash_dd_hhead(hs, bd),
429 struct cfs_hash_dhead_dep, dd_head);
430 if (hnd->next == NULL) { /* it's the tail */
431 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
432 container_of(hnd->pprev, struct hlist_node, next);
433 }
434 hlist_del_init(hnd);
435 return --dh->dd_depth;
436 }
437
438 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
439 .hop_hhead = cfs_hash_hh_hhead,
440 .hop_hhead_size = cfs_hash_hh_hhead_size,
441 .hop_hnode_add = cfs_hash_hh_hnode_add,
442 .hop_hnode_del = cfs_hash_hh_hnode_del,
443 };
444
445 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
446 .hop_hhead = cfs_hash_hd_hhead,
447 .hop_hhead_size = cfs_hash_hd_hhead_size,
448 .hop_hnode_add = cfs_hash_hd_hnode_add,
449 .hop_hnode_del = cfs_hash_hd_hnode_del,
450 };
451
452 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
453 .hop_hhead = cfs_hash_dh_hhead,
454 .hop_hhead_size = cfs_hash_dh_hhead_size,
455 .hop_hnode_add = cfs_hash_dh_hnode_add,
456 .hop_hnode_del = cfs_hash_dh_hnode_del,
457 };
458
459 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
460 .hop_hhead = cfs_hash_dd_hhead,
461 .hop_hhead_size = cfs_hash_dd_hhead_size,
462 .hop_hnode_add = cfs_hash_dd_hnode_add,
463 .hop_hnode_del = cfs_hash_dd_hnode_del,
464 };
465
466 static void
cfs_hash_hlist_setup(struct cfs_hash * hs)467 cfs_hash_hlist_setup(struct cfs_hash *hs)
468 {
469 if (cfs_hash_with_add_tail(hs)) {
470 hs->hs_hops = cfs_hash_with_depth(hs) ?
471 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
472 } else {
473 hs->hs_hops = cfs_hash_with_depth(hs) ?
474 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
475 }
476 }
477
478 static void
cfs_hash_bd_from_key(struct cfs_hash * hs,struct cfs_hash_bucket ** bkts,unsigned int bits,const void * key,struct cfs_hash_bd * bd)479 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
480 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
481 {
482 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
483
484 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
485
486 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
487 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
488 }
489
490 void
cfs_hash_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bd)491 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
492 {
493 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
494 if (likely(hs->hs_rehash_buckets == NULL)) {
495 cfs_hash_bd_from_key(hs, hs->hs_buckets,
496 hs->hs_cur_bits, key, bd);
497 } else {
498 LASSERT(hs->hs_rehash_bits != 0);
499 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
500 hs->hs_rehash_bits, key, bd);
501 }
502 }
503 EXPORT_SYMBOL(cfs_hash_bd_get);
504
505 static inline void
cfs_hash_bd_dep_record(struct cfs_hash * hs,struct cfs_hash_bd * bd,int dep_cur)506 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
507 {
508 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
509 return;
510
511 bd->bd_bucket->hsb_depmax = dep_cur;
512 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
513 if (likely(warn_on_depth == 0 ||
514 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
515 return;
516
517 spin_lock(&hs->hs_dep_lock);
518 hs->hs_dep_max = dep_cur;
519 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
520 hs->hs_dep_off = bd->bd_offset;
521 hs->hs_dep_bits = hs->hs_cur_bits;
522 spin_unlock(&hs->hs_dep_lock);
523
524 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
525 # endif
526 }
527
528 void
cfs_hash_bd_add_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)529 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
530 struct hlist_node *hnode)
531 {
532 int rc;
533
534 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
535 cfs_hash_bd_dep_record(hs, bd, rc);
536 bd->bd_bucket->hsb_version++;
537 if (unlikely(bd->bd_bucket->hsb_version == 0))
538 bd->bd_bucket->hsb_version++;
539 bd->bd_bucket->hsb_count++;
540
541 if (cfs_hash_with_counter(hs))
542 atomic_inc(&hs->hs_count);
543 if (!cfs_hash_with_no_itemref(hs))
544 cfs_hash_get(hs, hnode);
545 }
546 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
547
548 void
cfs_hash_bd_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode)549 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
550 struct hlist_node *hnode)
551 {
552 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
553
554 LASSERT(bd->bd_bucket->hsb_count > 0);
555 bd->bd_bucket->hsb_count--;
556 bd->bd_bucket->hsb_version++;
557 if (unlikely(bd->bd_bucket->hsb_version == 0))
558 bd->bd_bucket->hsb_version++;
559
560 if (cfs_hash_with_counter(hs)) {
561 LASSERT(atomic_read(&hs->hs_count) > 0);
562 atomic_dec(&hs->hs_count);
563 }
564 if (!cfs_hash_with_no_itemref(hs))
565 cfs_hash_put_locked(hs, hnode);
566 }
567 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
568
569 void
cfs_hash_bd_move_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd_old,struct cfs_hash_bd * bd_new,struct hlist_node * hnode)570 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
571 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
572 {
573 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
574 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
575 int rc;
576
577 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
578 return;
579
580 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
581 * in cfs_hash_bd_del/add_locked */
582 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
583 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
584 cfs_hash_bd_dep_record(hs, bd_new, rc);
585
586 LASSERT(obkt->hsb_count > 0);
587 obkt->hsb_count--;
588 obkt->hsb_version++;
589 if (unlikely(obkt->hsb_version == 0))
590 obkt->hsb_version++;
591 nbkt->hsb_count++;
592 nbkt->hsb_version++;
593 if (unlikely(nbkt->hsb_version == 0))
594 nbkt->hsb_version++;
595 }
596 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
597
598 enum {
599 /** always set, for sanity (avoid ZERO intent) */
600 CFS_HS_LOOKUP_MASK_FIND = BIT(0),
601 /** return entry with a ref */
602 CFS_HS_LOOKUP_MASK_REF = BIT(1),
603 /** add entry if not existing */
604 CFS_HS_LOOKUP_MASK_ADD = BIT(2),
605 /** delete entry, ignore other masks */
606 CFS_HS_LOOKUP_MASK_DEL = BIT(3),
607 };
608
609 enum cfs_hash_lookup_intent {
610 /** return item w/o refcount */
611 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
612 /** return item with refcount */
613 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
614 CFS_HS_LOOKUP_MASK_REF),
615 /** return item w/o refcount if existed, otherwise add */
616 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
617 CFS_HS_LOOKUP_MASK_ADD),
618 /** return item with refcount if existed, otherwise add */
619 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
620 CFS_HS_LOOKUP_MASK_ADD),
621 /** delete if existed */
622 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
623 CFS_HS_LOOKUP_MASK_DEL)
624 };
625
626 static struct hlist_node *
cfs_hash_bd_lookup_intent(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode,enum cfs_hash_lookup_intent intent)627 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
628 const void *key, struct hlist_node *hnode,
629 enum cfs_hash_lookup_intent intent)
630
631 {
632 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
633 struct hlist_node *ehnode;
634 struct hlist_node *match;
635 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
636
637 /* with this function, we can avoid a lot of useless refcount ops,
638 * which are expensive atomic operations most time. */
639 match = intent_add ? NULL : hnode;
640 hlist_for_each(ehnode, hhead) {
641 if (!cfs_hash_keycmp(hs, key, ehnode))
642 continue;
643
644 if (match != NULL && match != ehnode) /* can't match */
645 continue;
646
647 /* match and ... */
648 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
649 cfs_hash_bd_del_locked(hs, bd, ehnode);
650 return ehnode;
651 }
652
653 /* caller wants refcount? */
654 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
655 cfs_hash_get(hs, ehnode);
656 return ehnode;
657 }
658 /* no match item */
659 if (!intent_add)
660 return NULL;
661
662 LASSERT(hnode != NULL);
663 cfs_hash_bd_add_locked(hs, bd, hnode);
664 return hnode;
665 }
666
667 struct hlist_node *
cfs_hash_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)668 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
669 {
670 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
671 CFS_HS_LOOKUP_IT_FIND);
672 }
673 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
674
675 struct hlist_node *
cfs_hash_bd_peek_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key)676 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
677 {
678 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
679 CFS_HS_LOOKUP_IT_PEEK);
680 }
681 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
682
683 struct hlist_node *
cfs_hash_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode,int noref)684 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
685 const void *key, struct hlist_node *hnode,
686 int noref)
687 {
688 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
689 (!noref * CFS_HS_LOOKUP_MASK_REF) |
690 CFS_HS_LOOKUP_IT_ADD);
691 }
692 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
693
694 struct hlist_node *
cfs_hash_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,const void * key,struct hlist_node * hnode)695 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
696 const void *key, struct hlist_node *hnode)
697 {
698 /* hnode can be NULL, we find the first item with @key */
699 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
700 CFS_HS_LOOKUP_IT_FINDDEL);
701 }
702 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
703
704 static void
cfs_hash_multi_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,int excl)705 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
706 unsigned n, int excl)
707 {
708 struct cfs_hash_bucket *prev = NULL;
709 int i;
710
711 /**
712 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
713 * NB: it's possible that several bds point to the same bucket but
714 * have different bd::bd_offset, so need take care of deadlock.
715 */
716 cfs_hash_for_each_bd(bds, n, i) {
717 if (prev == bds[i].bd_bucket)
718 continue;
719
720 LASSERT(prev == NULL ||
721 prev->hsb_index < bds[i].bd_bucket->hsb_index);
722 cfs_hash_bd_lock(hs, &bds[i], excl);
723 prev = bds[i].bd_bucket;
724 }
725 }
726
727 static void
cfs_hash_multi_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,int excl)728 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
729 unsigned n, int excl)
730 {
731 struct cfs_hash_bucket *prev = NULL;
732 int i;
733
734 cfs_hash_for_each_bd(bds, n, i) {
735 if (prev != bds[i].bd_bucket) {
736 cfs_hash_bd_unlock(hs, &bds[i], excl);
737 prev = bds[i].bd_bucket;
738 }
739 }
740 }
741
742 static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key)743 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
744 unsigned n, const void *key)
745 {
746 struct hlist_node *ehnode;
747 unsigned i;
748
749 cfs_hash_for_each_bd(bds, n, i) {
750 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
751 CFS_HS_LOOKUP_IT_FIND);
752 if (ehnode != NULL)
753 return ehnode;
754 }
755 return NULL;
756 }
757
758 static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key,struct hlist_node * hnode,int noref)759 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
760 struct cfs_hash_bd *bds, unsigned n, const void *key,
761 struct hlist_node *hnode, int noref)
762 {
763 struct hlist_node *ehnode;
764 int intent;
765 unsigned i;
766
767 LASSERT(hnode != NULL);
768 intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
769
770 cfs_hash_for_each_bd(bds, n, i) {
771 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
772 NULL, intent);
773 if (ehnode != NULL)
774 return ehnode;
775 }
776
777 if (i == 1) { /* only one bucket */
778 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
779 } else {
780 struct cfs_hash_bd mybd;
781
782 cfs_hash_bd_get(hs, key, &mybd);
783 cfs_hash_bd_add_locked(hs, &mybd, hnode);
784 }
785
786 return hnode;
787 }
788
789 static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,unsigned n,const void * key,struct hlist_node * hnode)790 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
791 unsigned n, const void *key,
792 struct hlist_node *hnode)
793 {
794 struct hlist_node *ehnode;
795 unsigned i;
796
797 cfs_hash_for_each_bd(bds, n, i) {
798 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
799 CFS_HS_LOOKUP_IT_FINDDEL);
800 if (ehnode != NULL)
801 return ehnode;
802 }
803 return NULL;
804 }
805
806 static void
cfs_hash_bd_order(struct cfs_hash_bd * bd1,struct cfs_hash_bd * bd2)807 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
808 {
809 int rc;
810
811 if (bd2->bd_bucket == NULL)
812 return;
813
814 if (bd1->bd_bucket == NULL) {
815 *bd1 = *bd2;
816 bd2->bd_bucket = NULL;
817 return;
818 }
819
820 rc = cfs_hash_bd_compare(bd1, bd2);
821 if (rc == 0) {
822 bd2->bd_bucket = NULL;
823
824 } else if (rc > 0) { /* swab bd1 and bd2 */
825 struct cfs_hash_bd tmp;
826
827 tmp = *bd2;
828 *bd2 = *bd1;
829 *bd1 = tmp;
830 }
831 }
832
833 void
cfs_hash_dual_bd_get(struct cfs_hash * hs,const void * key,struct cfs_hash_bd * bds)834 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
835 {
836 /* NB: caller should hold hs_lock.rw if REHASH is set */
837 cfs_hash_bd_from_key(hs, hs->hs_buckets,
838 hs->hs_cur_bits, key, &bds[0]);
839 if (likely(hs->hs_rehash_buckets == NULL)) {
840 /* no rehash or not rehashing */
841 bds[1].bd_bucket = NULL;
842 return;
843 }
844
845 LASSERT(hs->hs_rehash_bits != 0);
846 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
847 hs->hs_rehash_bits, key, &bds[1]);
848
849 cfs_hash_bd_order(&bds[0], &bds[1]);
850 }
851 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
852
853 void
cfs_hash_dual_bd_lock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)854 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
855 {
856 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
857 }
858 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
859
860 void
cfs_hash_dual_bd_unlock(struct cfs_hash * hs,struct cfs_hash_bd * bds,int excl)861 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
862 {
863 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
864 }
865 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
866
867 struct hlist_node *
cfs_hash_dual_bd_lookup_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key)868 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
869 const void *key)
870 {
871 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
872 }
873 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
874
875 struct hlist_node *
cfs_hash_dual_bd_findadd_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode,int noref)876 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
877 const void *key, struct hlist_node *hnode,
878 int noref)
879 {
880 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
881 hnode, noref);
882 }
883 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
884
885 struct hlist_node *
cfs_hash_dual_bd_finddel_locked(struct cfs_hash * hs,struct cfs_hash_bd * bds,const void * key,struct hlist_node * hnode)886 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
887 const void *key, struct hlist_node *hnode)
888 {
889 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
890 }
891 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
892
893 static void
cfs_hash_buckets_free(struct cfs_hash_bucket ** buckets,int bkt_size,int prev_size,int size)894 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
895 int bkt_size, int prev_size, int size)
896 {
897 int i;
898
899 for (i = prev_size; i < size; i++) {
900 if (buckets[i] != NULL)
901 LIBCFS_FREE(buckets[i], bkt_size);
902 }
903
904 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
905 }
906
907 /*
908 * Create or grow bucket memory. Return old_buckets if no allocation was
909 * needed, the newly allocated buckets if allocation was needed and
910 * successful, and NULL on error.
911 */
912 static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(struct cfs_hash * hs,struct cfs_hash_bucket ** old_bkts,unsigned int old_size,unsigned int new_size)913 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
914 unsigned int old_size, unsigned int new_size)
915 {
916 struct cfs_hash_bucket **new_bkts;
917 int i;
918
919 LASSERT(old_size == 0 || old_bkts != NULL);
920
921 if (old_bkts != NULL && old_size == new_size)
922 return old_bkts;
923
924 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
925 if (new_bkts == NULL)
926 return NULL;
927
928 if (old_bkts != NULL) {
929 memcpy(new_bkts, old_bkts,
930 min(old_size, new_size) * sizeof(*old_bkts));
931 }
932
933 for (i = old_size; i < new_size; i++) {
934 struct hlist_head *hhead;
935 struct cfs_hash_bd bd;
936
937 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
938 if (new_bkts[i] == NULL) {
939 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
940 old_size, new_size);
941 return NULL;
942 }
943
944 new_bkts[i]->hsb_index = i;
945 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
946 new_bkts[i]->hsb_depmax = -1; /* unknown */
947 bd.bd_bucket = new_bkts[i];
948 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
949 INIT_HLIST_HEAD(hhead);
950
951 if (cfs_hash_with_no_lock(hs) ||
952 cfs_hash_with_no_bktlock(hs))
953 continue;
954
955 if (cfs_hash_with_rw_bktlock(hs))
956 rwlock_init(&new_bkts[i]->hsb_lock.rw);
957 else if (cfs_hash_with_spin_bktlock(hs))
958 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
959 else
960 LBUG(); /* invalid use-case */
961 }
962 return new_bkts;
963 }
964
965 /**
966 * Initialize new libcfs hash, where:
967 * @name - Descriptive hash name
968 * @cur_bits - Initial hash table size, in bits
969 * @max_bits - Maximum allowed hash table resize, in bits
970 * @ops - Registered hash table operations
971 * @flags - CFS_HASH_REHASH enable synamic hash resizing
972 * - CFS_HASH_SORT enable chained hash sort
973 */
974 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
975
976 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
cfs_hash_dep_print(cfs_workitem_t * wi)977 static int cfs_hash_dep_print(cfs_workitem_t *wi)
978 {
979 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
980 int dep;
981 int bkt;
982 int off;
983 int bits;
984
985 spin_lock(&hs->hs_dep_lock);
986 dep = hs->hs_dep_max;
987 bkt = hs->hs_dep_bkt;
988 off = hs->hs_dep_off;
989 bits = hs->hs_dep_bits;
990 spin_unlock(&hs->hs_dep_lock);
991
992 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
993 hs->hs_name, bits, dep, bkt, off);
994 spin_lock(&hs->hs_dep_lock);
995 hs->hs_dep_bits = 0; /* mark as workitem done */
996 spin_unlock(&hs->hs_dep_lock);
997 return 0;
998 }
999
cfs_hash_depth_wi_init(struct cfs_hash * hs)1000 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
1001 {
1002 spin_lock_init(&hs->hs_dep_lock);
1003 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
1004 }
1005
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)1006 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
1007 {
1008 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1009 return;
1010
1011 spin_lock(&hs->hs_dep_lock);
1012 while (hs->hs_dep_bits != 0) {
1013 spin_unlock(&hs->hs_dep_lock);
1014 cond_resched();
1015 spin_lock(&hs->hs_dep_lock);
1016 }
1017 spin_unlock(&hs->hs_dep_lock);
1018 }
1019
1020 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1021
cfs_hash_depth_wi_init(struct cfs_hash * hs)1022 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
cfs_hash_depth_wi_cancel(struct cfs_hash * hs)1023 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1024
1025 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1026
1027 struct cfs_hash *
cfs_hash_create(char * name,unsigned cur_bits,unsigned max_bits,unsigned bkt_bits,unsigned extra_bytes,unsigned min_theta,unsigned max_theta,struct cfs_hash_ops * ops,unsigned flags)1028 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1029 unsigned bkt_bits, unsigned extra_bytes,
1030 unsigned min_theta, unsigned max_theta,
1031 struct cfs_hash_ops *ops, unsigned flags)
1032 {
1033 struct cfs_hash *hs;
1034 int len;
1035
1036 CLASSERT(CFS_HASH_THETA_BITS < 15);
1037
1038 LASSERT(name != NULL);
1039 LASSERT(ops != NULL);
1040 LASSERT(ops->hs_key);
1041 LASSERT(ops->hs_hash);
1042 LASSERT(ops->hs_object);
1043 LASSERT(ops->hs_keycmp);
1044 LASSERT(ops->hs_get != NULL);
1045 LASSERT(ops->hs_put_locked != NULL);
1046
1047 if ((flags & CFS_HASH_REHASH) != 0)
1048 flags |= CFS_HASH_COUNTER; /* must have counter */
1049
1050 LASSERT(cur_bits > 0);
1051 LASSERT(cur_bits >= bkt_bits);
1052 LASSERT(max_bits >= cur_bits && max_bits < 31);
1053 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1054 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1055 (flags & CFS_HASH_NO_LOCK) == 0));
1056 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1057 ops->hs_keycpy != NULL));
1058
1059 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1060 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1061 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1062 if (hs == NULL)
1063 return NULL;
1064
1065 strncpy(hs->hs_name, name, len);
1066 hs->hs_name[len - 1] = '\0';
1067 hs->hs_flags = flags;
1068
1069 atomic_set(&hs->hs_refcount, 1);
1070 atomic_set(&hs->hs_count, 0);
1071
1072 cfs_hash_lock_setup(hs);
1073 cfs_hash_hlist_setup(hs);
1074
1075 hs->hs_cur_bits = (__u8)cur_bits;
1076 hs->hs_min_bits = (__u8)cur_bits;
1077 hs->hs_max_bits = (__u8)max_bits;
1078 hs->hs_bkt_bits = (__u8)bkt_bits;
1079
1080 hs->hs_ops = ops;
1081 hs->hs_extra_bytes = extra_bytes;
1082 hs->hs_rehash_bits = 0;
1083 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1084 cfs_hash_depth_wi_init(hs);
1085
1086 if (cfs_hash_with_rehash(hs))
1087 __cfs_hash_set_theta(hs, min_theta, max_theta);
1088
1089 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1090 CFS_HASH_NBKT(hs));
1091 if (hs->hs_buckets != NULL)
1092 return hs;
1093
1094 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1095 return NULL;
1096 }
1097 EXPORT_SYMBOL(cfs_hash_create);
1098
1099 /**
1100 * Cleanup libcfs hash @hs.
1101 */
1102 static void
cfs_hash_destroy(struct cfs_hash * hs)1103 cfs_hash_destroy(struct cfs_hash *hs)
1104 {
1105 struct hlist_node *hnode;
1106 struct hlist_node *pos;
1107 struct cfs_hash_bd bd;
1108 int i;
1109
1110 LASSERT(hs != NULL);
1111 LASSERT(!cfs_hash_is_exiting(hs) &&
1112 !cfs_hash_is_iterating(hs));
1113
1114 /**
1115 * prohibit further rehashes, don't need any lock because
1116 * I'm the only (last) one can change it.
1117 */
1118 hs->hs_exiting = 1;
1119 if (cfs_hash_with_rehash(hs))
1120 cfs_hash_rehash_cancel(hs);
1121
1122 cfs_hash_depth_wi_cancel(hs);
1123 /* rehash should be done/canceled */
1124 LASSERT(hs->hs_buckets != NULL &&
1125 hs->hs_rehash_buckets == NULL);
1126
1127 cfs_hash_for_each_bucket(hs, &bd, i) {
1128 struct hlist_head *hhead;
1129
1130 LASSERT(bd.bd_bucket != NULL);
1131 /* no need to take this lock, just for consistent code */
1132 cfs_hash_bd_lock(hs, &bd, 1);
1133
1134 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1135 hlist_for_each_safe(hnode, pos, hhead) {
1136 LASSERTF(!cfs_hash_with_assert_empty(hs),
1137 "hash %s bucket %u(%u) is not empty: %u items left\n",
1138 hs->hs_name, bd.bd_bucket->hsb_index,
1139 bd.bd_offset, bd.bd_bucket->hsb_count);
1140 /* can't assert key valicate, because we
1141 * can interrupt rehash */
1142 cfs_hash_bd_del_locked(hs, &bd, hnode);
1143 cfs_hash_exit(hs, hnode);
1144 }
1145 }
1146 LASSERT(bd.bd_bucket->hsb_count == 0);
1147 cfs_hash_bd_unlock(hs, &bd, 1);
1148 cond_resched();
1149 }
1150
1151 LASSERT(atomic_read(&hs->hs_count) == 0);
1152
1153 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1154 0, CFS_HASH_NBKT(hs));
1155 i = cfs_hash_with_bigname(hs) ?
1156 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1157 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1158 }
1159
cfs_hash_getref(struct cfs_hash * hs)1160 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1161 {
1162 if (atomic_inc_not_zero(&hs->hs_refcount))
1163 return hs;
1164 return NULL;
1165 }
1166 EXPORT_SYMBOL(cfs_hash_getref);
1167
cfs_hash_putref(struct cfs_hash * hs)1168 void cfs_hash_putref(struct cfs_hash *hs)
1169 {
1170 if (atomic_dec_and_test(&hs->hs_refcount))
1171 cfs_hash_destroy(hs);
1172 }
1173 EXPORT_SYMBOL(cfs_hash_putref);
1174
1175 static inline int
cfs_hash_rehash_bits(struct cfs_hash * hs)1176 cfs_hash_rehash_bits(struct cfs_hash *hs)
1177 {
1178 if (cfs_hash_with_no_lock(hs) ||
1179 !cfs_hash_with_rehash(hs))
1180 return -EOPNOTSUPP;
1181
1182 if (unlikely(cfs_hash_is_exiting(hs)))
1183 return -ESRCH;
1184
1185 if (unlikely(cfs_hash_is_rehashing(hs)))
1186 return -EALREADY;
1187
1188 if (unlikely(cfs_hash_is_iterating(hs)))
1189 return -EAGAIN;
1190
1191 /* XXX: need to handle case with max_theta != 2.0
1192 * and the case with min_theta != 0.5 */
1193 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1194 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1195 return hs->hs_cur_bits + 1;
1196
1197 if (!cfs_hash_with_shrink(hs))
1198 return 0;
1199
1200 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1201 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1202 return hs->hs_cur_bits - 1;
1203
1204 return 0;
1205 }
1206
1207 /**
1208 * don't allow inline rehash if:
1209 * - user wants non-blocking change (add/del) on hash table
1210 * - too many elements
1211 */
1212 static inline int
cfs_hash_rehash_inline(struct cfs_hash * hs)1213 cfs_hash_rehash_inline(struct cfs_hash *hs)
1214 {
1215 return !cfs_hash_with_nblk_change(hs) &&
1216 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1217 }
1218
1219 /**
1220 * Add item @hnode to libcfs hash @hs using @key. The registered
1221 * ops->hs_get function will be called when the item is added.
1222 */
1223 void
cfs_hash_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1224 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1225 {
1226 struct cfs_hash_bd bd;
1227 int bits;
1228
1229 LASSERT(hlist_unhashed(hnode));
1230
1231 cfs_hash_lock(hs, 0);
1232 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1233
1234 cfs_hash_key_validate(hs, key, hnode);
1235 cfs_hash_bd_add_locked(hs, &bd, hnode);
1236
1237 cfs_hash_bd_unlock(hs, &bd, 1);
1238
1239 bits = cfs_hash_rehash_bits(hs);
1240 cfs_hash_unlock(hs, 0);
1241 if (bits > 0)
1242 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1243 }
1244 EXPORT_SYMBOL(cfs_hash_add);
1245
1246 static struct hlist_node *
cfs_hash_find_or_add(struct cfs_hash * hs,const void * key,struct hlist_node * hnode,int noref)1247 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1248 struct hlist_node *hnode, int noref)
1249 {
1250 struct hlist_node *ehnode;
1251 struct cfs_hash_bd bds[2];
1252 int bits = 0;
1253
1254 LASSERT(hlist_unhashed(hnode));
1255
1256 cfs_hash_lock(hs, 0);
1257 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1258
1259 cfs_hash_key_validate(hs, key, hnode);
1260 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1261 hnode, noref);
1262 cfs_hash_dual_bd_unlock(hs, bds, 1);
1263
1264 if (ehnode == hnode) /* new item added */
1265 bits = cfs_hash_rehash_bits(hs);
1266 cfs_hash_unlock(hs, 0);
1267 if (bits > 0)
1268 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1269
1270 return ehnode;
1271 }
1272
1273 /**
1274 * Add item @hnode to libcfs hash @hs using @key. The registered
1275 * ops->hs_get function will be called if the item was added.
1276 * Returns 0 on success or -EALREADY on key collisions.
1277 */
1278 int
cfs_hash_add_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1279 cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1280 {
1281 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1282 -EALREADY : 0;
1283 }
1284 EXPORT_SYMBOL(cfs_hash_add_unique);
1285
1286 /**
1287 * Add item @hnode to libcfs hash @hs using @key. If this @key
1288 * already exists in the hash then ops->hs_get will be called on the
1289 * conflicting entry and that entry will be returned to the caller.
1290 * Otherwise ops->hs_get is called on the item which was added.
1291 */
1292 void *
cfs_hash_findadd_unique(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1293 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1294 struct hlist_node *hnode)
1295 {
1296 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1297
1298 return cfs_hash_object(hs, hnode);
1299 }
1300 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1301
1302 /**
1303 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1304 * is required to ensure the correct hash bucket is locked since there
1305 * is no direct linkage from the item to the bucket. The object
1306 * removed from the hash will be returned and obs->hs_put is called
1307 * on the removed object.
1308 */
1309 void *
cfs_hash_del(struct cfs_hash * hs,const void * key,struct hlist_node * hnode)1310 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1311 {
1312 void *obj = NULL;
1313 int bits = 0;
1314 struct cfs_hash_bd bds[2];
1315
1316 cfs_hash_lock(hs, 0);
1317 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1318
1319 /* NB: do nothing if @hnode is not in hash table */
1320 if (hnode == NULL || !hlist_unhashed(hnode)) {
1321 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1322 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1323 } else {
1324 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1325 key, hnode);
1326 }
1327 }
1328
1329 if (hnode != NULL) {
1330 obj = cfs_hash_object(hs, hnode);
1331 bits = cfs_hash_rehash_bits(hs);
1332 }
1333
1334 cfs_hash_dual_bd_unlock(hs, bds, 1);
1335 cfs_hash_unlock(hs, 0);
1336 if (bits > 0)
1337 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1338
1339 return obj;
1340 }
1341 EXPORT_SYMBOL(cfs_hash_del);
1342
1343 /**
1344 * Delete item given @key in libcfs hash @hs. The first @key found in
1345 * the hash will be removed, if the key exists multiple times in the hash
1346 * @hs this function must be called once per key. The removed object
1347 * will be returned and ops->hs_put is called on the removed object.
1348 */
1349 void *
cfs_hash_del_key(struct cfs_hash * hs,const void * key)1350 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1351 {
1352 return cfs_hash_del(hs, key, NULL);
1353 }
1354 EXPORT_SYMBOL(cfs_hash_del_key);
1355
1356 /**
1357 * Lookup an item using @key in the libcfs hash @hs and return it.
1358 * If the @key is found in the hash hs->hs_get() is called and the
1359 * matching objects is returned. It is the callers responsibility
1360 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1361 * when when finished with the object. If the @key was not found
1362 * in the hash @hs NULL is returned.
1363 */
1364 void *
cfs_hash_lookup(struct cfs_hash * hs,const void * key)1365 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1366 {
1367 void *obj = NULL;
1368 struct hlist_node *hnode;
1369 struct cfs_hash_bd bds[2];
1370
1371 cfs_hash_lock(hs, 0);
1372 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1373
1374 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1375 if (hnode != NULL)
1376 obj = cfs_hash_object(hs, hnode);
1377
1378 cfs_hash_dual_bd_unlock(hs, bds, 0);
1379 cfs_hash_unlock(hs, 0);
1380
1381 return obj;
1382 }
1383 EXPORT_SYMBOL(cfs_hash_lookup);
1384
1385 static void
cfs_hash_for_each_enter(struct cfs_hash * hs)1386 cfs_hash_for_each_enter(struct cfs_hash *hs) {
1387 LASSERT(!cfs_hash_is_exiting(hs));
1388
1389 if (!cfs_hash_with_rehash(hs))
1390 return;
1391 /*
1392 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1393 * because it's just an unreliable signal to rehash-thread,
1394 * rehash-thread will try to finish rehash ASAP when seeing this.
1395 */
1396 hs->hs_iterating = 1;
1397
1398 cfs_hash_lock(hs, 1);
1399 hs->hs_iterators++;
1400
1401 /* NB: iteration is mostly called by service thread,
1402 * we tend to cancel pending rehash-request, instead of
1403 * blocking service thread, we will relaunch rehash request
1404 * after iteration */
1405 if (cfs_hash_is_rehashing(hs))
1406 cfs_hash_rehash_cancel_locked(hs);
1407 cfs_hash_unlock(hs, 1);
1408 }
1409
1410 static void
cfs_hash_for_each_exit(struct cfs_hash * hs)1411 cfs_hash_for_each_exit(struct cfs_hash *hs) {
1412 int remained;
1413 int bits;
1414
1415 if (!cfs_hash_with_rehash(hs))
1416 return;
1417 cfs_hash_lock(hs, 1);
1418 remained = --hs->hs_iterators;
1419 bits = cfs_hash_rehash_bits(hs);
1420 cfs_hash_unlock(hs, 1);
1421 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1422 if (remained == 0)
1423 hs->hs_iterating = 0;
1424 if (bits > 0) {
1425 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1426 CFS_HASH_LOOP_HOG);
1427 }
1428 }
1429
1430 /**
1431 * For each item in the libcfs hash @hs call the passed callback @func
1432 * and pass to it as an argument each hash item and the private @data.
1433 *
1434 * a) the function may sleep!
1435 * b) during the callback:
1436 * . the bucket lock is held so the callback must never sleep.
1437 * . if @removal_safe is true, use can remove current item by
1438 * cfs_hash_bd_del_locked
1439 */
1440 static __u64
cfs_hash_for_each_tight(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data,int remove_safe)1441 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1442 void *data, int remove_safe) {
1443 struct hlist_node *hnode;
1444 struct hlist_node *pos;
1445 struct cfs_hash_bd bd;
1446 __u64 count = 0;
1447 int excl = !!remove_safe;
1448 int loop = 0;
1449 int i;
1450
1451 cfs_hash_for_each_enter(hs);
1452
1453 cfs_hash_lock(hs, 0);
1454 LASSERT(!cfs_hash_is_rehashing(hs));
1455
1456 cfs_hash_for_each_bucket(hs, &bd, i) {
1457 struct hlist_head *hhead;
1458
1459 cfs_hash_bd_lock(hs, &bd, excl);
1460 if (func == NULL) { /* only glimpse size */
1461 count += bd.bd_bucket->hsb_count;
1462 cfs_hash_bd_unlock(hs, &bd, excl);
1463 continue;
1464 }
1465
1466 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1467 hlist_for_each_safe(hnode, pos, hhead) {
1468 cfs_hash_bucket_validate(hs, &bd, hnode);
1469 count++;
1470 loop++;
1471 if (func(hs, &bd, hnode, data)) {
1472 cfs_hash_bd_unlock(hs, &bd, excl);
1473 goto out;
1474 }
1475 }
1476 }
1477 cfs_hash_bd_unlock(hs, &bd, excl);
1478 if (loop < CFS_HASH_LOOP_HOG)
1479 continue;
1480 loop = 0;
1481 cfs_hash_unlock(hs, 0);
1482 cond_resched();
1483 cfs_hash_lock(hs, 0);
1484 }
1485 out:
1486 cfs_hash_unlock(hs, 0);
1487
1488 cfs_hash_for_each_exit(hs);
1489 return count;
1490 }
1491
1492 struct cfs_hash_cond_arg {
1493 cfs_hash_cond_opt_cb_t func;
1494 void *arg;
1495 };
1496
1497 static int
cfs_hash_cond_del_locked(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1498 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1499 struct hlist_node *hnode, void *data)
1500 {
1501 struct cfs_hash_cond_arg *cond = data;
1502
1503 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1504 cfs_hash_bd_del_locked(hs, bd, hnode);
1505 return 0;
1506 }
1507
1508 /**
1509 * Delete item from the libcfs hash @hs when @func return true.
1510 * The write lock being hold during loop for each bucket to avoid
1511 * any object be reference.
1512 */
1513 void
cfs_hash_cond_del(struct cfs_hash * hs,cfs_hash_cond_opt_cb_t func,void * data)1514 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1515 {
1516 struct cfs_hash_cond_arg arg = {
1517 .func = func,
1518 .arg = data,
1519 };
1520
1521 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1522 }
1523 EXPORT_SYMBOL(cfs_hash_cond_del);
1524
1525 void
cfs_hash_for_each(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1526 cfs_hash_for_each(struct cfs_hash *hs,
1527 cfs_hash_for_each_cb_t func, void *data)
1528 {
1529 cfs_hash_for_each_tight(hs, func, data, 0);
1530 }
1531 EXPORT_SYMBOL(cfs_hash_for_each);
1532
1533 void
cfs_hash_for_each_safe(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1534 cfs_hash_for_each_safe(struct cfs_hash *hs,
1535 cfs_hash_for_each_cb_t func, void *data) {
1536 cfs_hash_for_each_tight(hs, func, data, 1);
1537 }
1538 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1539
1540 static int
cfs_hash_peek(struct cfs_hash * hs,struct cfs_hash_bd * bd,struct hlist_node * hnode,void * data)1541 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1542 struct hlist_node *hnode, void *data)
1543 {
1544 *(int *)data = 0;
1545 return 1; /* return 1 to break the loop */
1546 }
1547
1548 int
cfs_hash_is_empty(struct cfs_hash * hs)1549 cfs_hash_is_empty(struct cfs_hash *hs)
1550 {
1551 int empty = 1;
1552
1553 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1554 return empty;
1555 }
1556 EXPORT_SYMBOL(cfs_hash_is_empty);
1557
1558 __u64
cfs_hash_size_get(struct cfs_hash * hs)1559 cfs_hash_size_get(struct cfs_hash *hs)
1560 {
1561 return cfs_hash_with_counter(hs) ?
1562 atomic_read(&hs->hs_count) :
1563 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1564 }
1565 EXPORT_SYMBOL(cfs_hash_size_get);
1566
1567 /*
1568 * cfs_hash_for_each_relax:
1569 * Iterate the hash table and call @func on each item without
1570 * any lock. This function can't guarantee to finish iteration
1571 * if these features are enabled:
1572 *
1573 * a. if rehash_key is enabled, an item can be moved from
1574 * one bucket to another bucket
1575 * b. user can remove non-zero-ref item from hash-table,
1576 * so the item can be removed from hash-table, even worse,
1577 * it's possible that user changed key and insert to another
1578 * hash bucket.
1579 * there's no way for us to finish iteration correctly on previous
1580 * two cases, so iteration has to be stopped on change.
1581 */
1582 static int
cfs_hash_for_each_relax(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1583 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1584 void *data) {
1585 struct hlist_node *hnode;
1586 struct hlist_node *tmp;
1587 struct cfs_hash_bd bd;
1588 __u32 version;
1589 int count = 0;
1590 int stop_on_change;
1591 int rc;
1592 int i;
1593
1594 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1595 !cfs_hash_with_no_itemref(hs) ||
1596 hs->hs_ops->hs_put_locked == NULL;
1597 cfs_hash_lock(hs, 0);
1598 LASSERT(!cfs_hash_is_rehashing(hs));
1599
1600 cfs_hash_for_each_bucket(hs, &bd, i) {
1601 struct hlist_head *hhead;
1602
1603 cfs_hash_bd_lock(hs, &bd, 0);
1604 version = cfs_hash_bd_version_get(&bd);
1605
1606 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1607 for (hnode = hhead->first; hnode != NULL;) {
1608 cfs_hash_bucket_validate(hs, &bd, hnode);
1609 cfs_hash_get(hs, hnode);
1610 cfs_hash_bd_unlock(hs, &bd, 0);
1611 cfs_hash_unlock(hs, 0);
1612
1613 rc = func(hs, &bd, hnode, data);
1614 if (stop_on_change)
1615 cfs_hash_put(hs, hnode);
1616 cond_resched();
1617 count++;
1618
1619 cfs_hash_lock(hs, 0);
1620 cfs_hash_bd_lock(hs, &bd, 0);
1621 if (!stop_on_change) {
1622 tmp = hnode->next;
1623 cfs_hash_put_locked(hs, hnode);
1624 hnode = tmp;
1625 } else { /* bucket changed? */
1626 if (version !=
1627 cfs_hash_bd_version_get(&bd))
1628 break;
1629 /* safe to continue because no change */
1630 hnode = hnode->next;
1631 }
1632 if (rc) /* callback wants to break iteration */
1633 break;
1634 }
1635 if (rc) /* callback wants to break iteration */
1636 break;
1637 }
1638 cfs_hash_bd_unlock(hs, &bd, 0);
1639 if (rc) /* callback wants to break iteration */
1640 break;
1641 }
1642 cfs_hash_unlock(hs, 0);
1643
1644 return count;
1645 }
1646
1647 int
cfs_hash_for_each_nolock(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1648 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1649 cfs_hash_for_each_cb_t func, void *data) {
1650 if (cfs_hash_with_no_lock(hs) ||
1651 cfs_hash_with_rehash_key(hs) ||
1652 !cfs_hash_with_no_itemref(hs))
1653 return -EOPNOTSUPP;
1654
1655 if (hs->hs_ops->hs_get == NULL ||
1656 (hs->hs_ops->hs_put == NULL &&
1657 hs->hs_ops->hs_put_locked == NULL))
1658 return -EOPNOTSUPP;
1659
1660 cfs_hash_for_each_enter(hs);
1661 cfs_hash_for_each_relax(hs, func, data);
1662 cfs_hash_for_each_exit(hs);
1663
1664 return 0;
1665 }
1666 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1667
1668 /**
1669 * For each hash bucket in the libcfs hash @hs call the passed callback
1670 * @func until all the hash buckets are empty. The passed callback @func
1671 * or the previously registered callback hs->hs_put must remove the item
1672 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1673 * functions. No rwlocks will be held during the callback @func it is
1674 * safe to sleep if needed. This function will not terminate until the
1675 * hash is empty. Note it is still possible to concurrently add new
1676 * items in to the hash. It is the callers responsibility to ensure
1677 * the required locking is in place to prevent concurrent insertions.
1678 */
1679 int
cfs_hash_for_each_empty(struct cfs_hash * hs,cfs_hash_for_each_cb_t func,void * data)1680 cfs_hash_for_each_empty(struct cfs_hash *hs,
1681 cfs_hash_for_each_cb_t func, void *data) {
1682 unsigned i = 0;
1683
1684 if (cfs_hash_with_no_lock(hs))
1685 return -EOPNOTSUPP;
1686
1687 if (hs->hs_ops->hs_get == NULL ||
1688 (hs->hs_ops->hs_put == NULL &&
1689 hs->hs_ops->hs_put_locked == NULL))
1690 return -EOPNOTSUPP;
1691
1692 cfs_hash_for_each_enter(hs);
1693 while (cfs_hash_for_each_relax(hs, func, data)) {
1694 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1695 hs->hs_name, i++);
1696 }
1697 cfs_hash_for_each_exit(hs);
1698 return 0;
1699 }
1700 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1701
1702 void
cfs_hash_hlist_for_each(struct cfs_hash * hs,unsigned hindex,cfs_hash_for_each_cb_t func,void * data)1703 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1704 cfs_hash_for_each_cb_t func, void *data)
1705 {
1706 struct hlist_head *hhead;
1707 struct hlist_node *hnode;
1708 struct cfs_hash_bd bd;
1709
1710 cfs_hash_for_each_enter(hs);
1711 cfs_hash_lock(hs, 0);
1712 if (hindex >= CFS_HASH_NHLIST(hs))
1713 goto out;
1714
1715 cfs_hash_bd_index_set(hs, hindex, &bd);
1716
1717 cfs_hash_bd_lock(hs, &bd, 0);
1718 hhead = cfs_hash_bd_hhead(hs, &bd);
1719 hlist_for_each(hnode, hhead) {
1720 if (func(hs, &bd, hnode, data))
1721 break;
1722 }
1723 cfs_hash_bd_unlock(hs, &bd, 0);
1724 out:
1725 cfs_hash_unlock(hs, 0);
1726 cfs_hash_for_each_exit(hs);
1727 }
1728
1729 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1730
1731 /*
1732 * For each item in the libcfs hash @hs which matches the @key call
1733 * the passed callback @func and pass to it as an argument each hash
1734 * item and the private @data. During the callback the bucket lock
1735 * is held so the callback must never sleep.
1736 */
1737 void
cfs_hash_for_each_key(struct cfs_hash * hs,const void * key,cfs_hash_for_each_cb_t func,void * data)1738 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1739 cfs_hash_for_each_cb_t func, void *data) {
1740 struct hlist_node *hnode;
1741 struct cfs_hash_bd bds[2];
1742 unsigned i;
1743
1744 cfs_hash_lock(hs, 0);
1745
1746 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1747
1748 cfs_hash_for_each_bd(bds, 2, i) {
1749 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1750
1751 hlist_for_each(hnode, hlist) {
1752 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1753
1754 if (cfs_hash_keycmp(hs, key, hnode)) {
1755 if (func(hs, &bds[i], hnode, data))
1756 break;
1757 }
1758 }
1759 }
1760
1761 cfs_hash_dual_bd_unlock(hs, bds, 0);
1762 cfs_hash_unlock(hs, 0);
1763 }
1764 EXPORT_SYMBOL(cfs_hash_for_each_key);
1765
1766 /**
1767 * Rehash the libcfs hash @hs to the given @bits. This can be used
1768 * to grow the hash size when excessive chaining is detected, or to
1769 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1770 * flag is set in @hs the libcfs hash may be dynamically rehashed
1771 * during addition or removal if the hash's theta value exceeds
1772 * either the hs->hs_min_theta or hs->max_theta values. By default
1773 * these values are tuned to keep the chained hash depth small, and
1774 * this approach assumes a reasonably uniform hashing function. The
1775 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1776 */
1777 void
cfs_hash_rehash_cancel_locked(struct cfs_hash * hs)1778 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1779 {
1780 int i;
1781
1782 /* need hold cfs_hash_lock(hs, 1) */
1783 LASSERT(cfs_hash_with_rehash(hs) &&
1784 !cfs_hash_with_no_lock(hs));
1785
1786 if (!cfs_hash_is_rehashing(hs))
1787 return;
1788
1789 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1790 hs->hs_rehash_bits = 0;
1791 return;
1792 }
1793
1794 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1795 cfs_hash_unlock(hs, 1);
1796 /* raise console warning while waiting too long */
1797 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1798 "hash %s is still rehashing, rescheded %d\n",
1799 hs->hs_name, i - 1);
1800 cond_resched();
1801 cfs_hash_lock(hs, 1);
1802 }
1803 }
1804 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1805
1806 void
cfs_hash_rehash_cancel(struct cfs_hash * hs)1807 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1808 {
1809 cfs_hash_lock(hs, 1);
1810 cfs_hash_rehash_cancel_locked(hs);
1811 cfs_hash_unlock(hs, 1);
1812 }
1813 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1814
1815 int
cfs_hash_rehash(struct cfs_hash * hs,int do_rehash)1816 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1817 {
1818 int rc;
1819
1820 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1821
1822 cfs_hash_lock(hs, 1);
1823
1824 rc = cfs_hash_rehash_bits(hs);
1825 if (rc <= 0) {
1826 cfs_hash_unlock(hs, 1);
1827 return rc;
1828 }
1829
1830 hs->hs_rehash_bits = rc;
1831 if (!do_rehash) {
1832 /* launch and return */
1833 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1834 cfs_hash_unlock(hs, 1);
1835 return 0;
1836 }
1837
1838 /* rehash right now */
1839 cfs_hash_unlock(hs, 1);
1840
1841 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1842 }
1843 EXPORT_SYMBOL(cfs_hash_rehash);
1844
1845 static int
cfs_hash_rehash_bd(struct cfs_hash * hs,struct cfs_hash_bd * old)1846 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1847 {
1848 struct cfs_hash_bd new;
1849 struct hlist_head *hhead;
1850 struct hlist_node *hnode;
1851 struct hlist_node *pos;
1852 void *key;
1853 int c = 0;
1854
1855 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1856 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1857 hlist_for_each_safe(hnode, pos, hhead) {
1858 key = cfs_hash_key(hs, hnode);
1859 LASSERT(key != NULL);
1860 /* Validate hnode is in the correct bucket. */
1861 cfs_hash_bucket_validate(hs, old, hnode);
1862 /*
1863 * Delete from old hash bucket; move to new bucket.
1864 * ops->hs_key must be defined.
1865 */
1866 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1867 hs->hs_rehash_bits, key, &new);
1868 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1869 c++;
1870 }
1871 }
1872
1873 return c;
1874 }
1875
1876 static int
cfs_hash_rehash_worker(cfs_workitem_t * wi)1877 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1878 {
1879 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1880 struct cfs_hash_bucket **bkts;
1881 struct cfs_hash_bd bd;
1882 unsigned int old_size;
1883 unsigned int new_size;
1884 int bsize;
1885 int count = 0;
1886 int rc = 0;
1887 int i;
1888
1889 LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1890
1891 cfs_hash_lock(hs, 0);
1892 LASSERT(cfs_hash_is_rehashing(hs));
1893
1894 old_size = CFS_HASH_NBKT(hs);
1895 new_size = CFS_HASH_RH_NBKT(hs);
1896
1897 cfs_hash_unlock(hs, 0);
1898
1899 /*
1900 * don't need hs::hs_rwlock for hs::hs_buckets,
1901 * because nobody can change bkt-table except me.
1902 */
1903 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1904 old_size, new_size);
1905 cfs_hash_lock(hs, 1);
1906 if (bkts == NULL) {
1907 rc = -ENOMEM;
1908 goto out;
1909 }
1910
1911 if (bkts == hs->hs_buckets) {
1912 bkts = NULL; /* do nothing */
1913 goto out;
1914 }
1915
1916 rc = __cfs_hash_theta(hs);
1917 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1918 /* free the new allocated bkt-table */
1919 old_size = new_size;
1920 new_size = CFS_HASH_NBKT(hs);
1921 rc = -EALREADY;
1922 goto out;
1923 }
1924
1925 LASSERT(hs->hs_rehash_buckets == NULL);
1926 hs->hs_rehash_buckets = bkts;
1927
1928 rc = 0;
1929 cfs_hash_for_each_bucket(hs, &bd, i) {
1930 if (cfs_hash_is_exiting(hs)) {
1931 rc = -ESRCH;
1932 /* someone wants to destroy the hash, abort now */
1933 if (old_size < new_size) /* OK to free old bkt-table */
1934 break;
1935 /* it's shrinking, need free new bkt-table */
1936 hs->hs_rehash_buckets = NULL;
1937 old_size = new_size;
1938 new_size = CFS_HASH_NBKT(hs);
1939 goto out;
1940 }
1941
1942 count += cfs_hash_rehash_bd(hs, &bd);
1943 if (count < CFS_HASH_LOOP_HOG ||
1944 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1945 continue;
1946 }
1947
1948 count = 0;
1949 cfs_hash_unlock(hs, 1);
1950 cond_resched();
1951 cfs_hash_lock(hs, 1);
1952 }
1953
1954 hs->hs_rehash_count++;
1955
1956 bkts = hs->hs_buckets;
1957 hs->hs_buckets = hs->hs_rehash_buckets;
1958 hs->hs_rehash_buckets = NULL;
1959
1960 hs->hs_cur_bits = hs->hs_rehash_bits;
1961 out:
1962 hs->hs_rehash_bits = 0;
1963 if (rc == -ESRCH) /* never be scheduled again */
1964 cfs_wi_exit(cfs_sched_rehash, wi);
1965 bsize = cfs_hash_bkt_size(hs);
1966 cfs_hash_unlock(hs, 1);
1967 /* can't refer to @hs anymore because it could be destroyed */
1968 if (bkts != NULL)
1969 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1970 if (rc != 0)
1971 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1972 /* return 1 only if cfs_wi_exit is called */
1973 return rc == -ESRCH;
1974 }
1975
1976 /**
1977 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1978 * @old_key must be provided to locate the objects previous location
1979 * in the hash, and the @new_key will be used to reinsert the object.
1980 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1981 * combo when it is critical that there is no window in time where the
1982 * object is missing from the hash. When an object is being rehashed
1983 * the registered cfs_hash_get() and cfs_hash_put() functions will
1984 * not be called.
1985 */
cfs_hash_rehash_key(struct cfs_hash * hs,const void * old_key,void * new_key,struct hlist_node * hnode)1986 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1987 void *new_key, struct hlist_node *hnode)
1988 {
1989 struct cfs_hash_bd bds[3];
1990 struct cfs_hash_bd old_bds[2];
1991 struct cfs_hash_bd new_bd;
1992
1993 LASSERT(!hlist_unhashed(hnode));
1994
1995 cfs_hash_lock(hs, 0);
1996
1997 cfs_hash_dual_bd_get(hs, old_key, old_bds);
1998 cfs_hash_bd_get(hs, new_key, &new_bd);
1999
2000 bds[0] = old_bds[0];
2001 bds[1] = old_bds[1];
2002 bds[2] = new_bd;
2003
2004 /* NB: bds[0] and bds[1] are ordered already */
2005 cfs_hash_bd_order(&bds[1], &bds[2]);
2006 cfs_hash_bd_order(&bds[0], &bds[1]);
2007
2008 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2009 if (likely(old_bds[1].bd_bucket == NULL)) {
2010 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2011 } else {
2012 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2013 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2014 }
2015 /* overwrite key inside locks, otherwise may screw up with
2016 * other operations, i.e: rehash */
2017 cfs_hash_keycpy(hs, new_key, hnode);
2018
2019 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2020 cfs_hash_unlock(hs, 0);
2021 }
2022 EXPORT_SYMBOL(cfs_hash_rehash_key);
2023
cfs_hash_debug_header(struct seq_file * m)2024 void cfs_hash_debug_header(struct seq_file *m)
2025 {
2026 seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
2027 CFS_HASH_BIGNAME_LEN, "name");
2028 }
2029 EXPORT_SYMBOL(cfs_hash_debug_header);
2030
2031 static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash * hs)2032 cfs_hash_full_bkts(struct cfs_hash *hs)
2033 {
2034 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2035 if (hs->hs_rehash_buckets == NULL)
2036 return hs->hs_buckets;
2037
2038 LASSERT(hs->hs_rehash_bits != 0);
2039 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2040 hs->hs_rehash_buckets : hs->hs_buckets;
2041 }
2042
2043 static unsigned int
cfs_hash_full_nbkt(struct cfs_hash * hs)2044 cfs_hash_full_nbkt(struct cfs_hash *hs)
2045 {
2046 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2047 if (hs->hs_rehash_buckets == NULL)
2048 return CFS_HASH_NBKT(hs);
2049
2050 LASSERT(hs->hs_rehash_bits != 0);
2051 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2052 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2053 }
2054
cfs_hash_debug_str(struct cfs_hash * hs,struct seq_file * m)2055 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2056 {
2057 int dist[8] = { 0, };
2058 int maxdep = -1;
2059 int maxdepb = -1;
2060 int total = 0;
2061 int theta;
2062 int i;
2063
2064 cfs_hash_lock(hs, 0);
2065 theta = __cfs_hash_theta(hs);
2066
2067 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2068 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2069 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2070 1 << hs->hs_max_bits,
2071 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2072 __cfs_hash_theta_int(hs->hs_min_theta),
2073 __cfs_hash_theta_frac(hs->hs_min_theta),
2074 __cfs_hash_theta_int(hs->hs_max_theta),
2075 __cfs_hash_theta_frac(hs->hs_max_theta),
2076 hs->hs_flags, hs->hs_rehash_count);
2077
2078 /*
2079 * The distribution is a summary of the chained hash depth in
2080 * each of the libcfs hash buckets. Each buckets hsb_count is
2081 * divided by the hash theta value and used to generate a
2082 * histogram of the hash distribution. A uniform hash will
2083 * result in all hash buckets being close to the average thus
2084 * only the first few entries in the histogram will be non-zero.
2085 * If you hash function results in a non-uniform hash the will
2086 * be observable by outlier bucks in the distribution histogram.
2087 *
2088 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2089 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2090 */
2091 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2092 struct cfs_hash_bd bd;
2093
2094 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2095 cfs_hash_bd_lock(hs, &bd, 0);
2096 if (maxdep < bd.bd_bucket->hsb_depmax) {
2097 maxdep = bd.bd_bucket->hsb_depmax;
2098 maxdepb = ffz(~maxdep);
2099 }
2100 total += bd.bd_bucket->hsb_count;
2101 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2102 cfs_hash_bd_unlock(hs, &bd, 0);
2103 }
2104
2105 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2106 for (i = 0; i < 8; i++)
2107 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2108
2109 cfs_hash_unlock(hs, 0);
2110 }
2111 EXPORT_SYMBOL(cfs_hash_debug_str);
2112