Lines Matching refs:ce

98 #define	MB_CACHE_ENTRY_LOCK_INDEX(ce)			\  argument
99 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
133 __spin_lock_mb_cache_entry(struct mb_cache_entry *ce) in __spin_lock_mb_cache_entry() argument
136 MB_CACHE_ENTRY_LOCK_INDEX(ce))); in __spin_lock_mb_cache_entry()
140 __spin_unlock_mb_cache_entry(struct mb_cache_entry *ce) in __spin_unlock_mb_cache_entry() argument
143 MB_CACHE_ENTRY_LOCK_INDEX(ce))); in __spin_unlock_mb_cache_entry()
147 __mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce) in __mb_cache_entry_is_block_hashed() argument
149 return !hlist_bl_unhashed(&ce->e_block_list); in __mb_cache_entry_is_block_hashed()
154 __mb_cache_entry_unhash_block(struct mb_cache_entry *ce) in __mb_cache_entry_unhash_block() argument
156 if (__mb_cache_entry_is_block_hashed(ce)) in __mb_cache_entry_unhash_block()
157 hlist_bl_del_init(&ce->e_block_list); in __mb_cache_entry_unhash_block()
161 __mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce) in __mb_cache_entry_is_index_hashed() argument
163 return !hlist_bl_unhashed(&ce->e_index.o_list); in __mb_cache_entry_is_index_hashed()
167 __mb_cache_entry_unhash_index(struct mb_cache_entry *ce) in __mb_cache_entry_unhash_index() argument
169 if (__mb_cache_entry_is_index_hashed(ce)) in __mb_cache_entry_unhash_index()
170 hlist_bl_del_init(&ce->e_index.o_list); in __mb_cache_entry_unhash_index()
182 __mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce) in __mb_cache_entry_unhash_unlock() argument
184 __mb_cache_entry_unhash_index(ce); in __mb_cache_entry_unhash_unlock()
185 hlist_bl_unlock(ce->e_index_hash_p); in __mb_cache_entry_unhash_unlock()
186 __mb_cache_entry_unhash_block(ce); in __mb_cache_entry_unhash_unlock()
187 hlist_bl_unlock(ce->e_block_hash_p); in __mb_cache_entry_unhash_unlock()
191 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) in __mb_cache_entry_forget() argument
193 struct mb_cache *cache = ce->e_cache; in __mb_cache_entry_forget()
195 mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))); in __mb_cache_entry_forget()
196 kmem_cache_free(cache->c_entry_cache, ce); in __mb_cache_entry_forget()
201 __mb_cache_entry_release(struct mb_cache_entry *ce) in __mb_cache_entry_release() argument
204 __spin_lock_mb_cache_entry(ce); in __mb_cache_entry_release()
206 if (ce->e_queued) in __mb_cache_entry_release()
208 if (ce->e_used >= MB_CACHE_WRITER) in __mb_cache_entry_release()
209 ce->e_used -= MB_CACHE_WRITER; in __mb_cache_entry_release()
214 ce->e_used--; in __mb_cache_entry_release()
215 if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) { in __mb_cache_entry_release()
216 if (!__mb_cache_entry_is_block_hashed(ce)) { in __mb_cache_entry_release()
217 __spin_unlock_mb_cache_entry(ce); in __mb_cache_entry_release()
225 if (list_empty(&ce->e_lru_list)) in __mb_cache_entry_release()
226 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list); in __mb_cache_entry_release()
229 __spin_unlock_mb_cache_entry(ce); in __mb_cache_entry_release()
232 mb_assert(list_empty(&ce->e_lru_list)); in __mb_cache_entry_release()
233 __mb_cache_entry_forget(ce, GFP_KERNEL); in __mb_cache_entry_release()
259 struct mb_cache_entry *ce = in mb_cache_shrink_scan() local
262 list_del_init(&ce->e_lru_list); in mb_cache_shrink_scan()
263 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)) in mb_cache_shrink_scan()
267 hlist_bl_lock(ce->e_block_hash_p); in mb_cache_shrink_scan()
268 hlist_bl_lock(ce->e_index_hash_p); in mb_cache_shrink_scan()
270 if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) || in mb_cache_shrink_scan()
271 !list_empty(&ce->e_lru_list)) { in mb_cache_shrink_scan()
272 hlist_bl_unlock(ce->e_index_hash_p); in mb_cache_shrink_scan()
273 hlist_bl_unlock(ce->e_block_hash_p); in mb_cache_shrink_scan()
277 __mb_cache_entry_unhash_unlock(ce); in mb_cache_shrink_scan()
278 list_add_tail(&ce->e_lru_list, &free_list); in mb_cache_shrink_scan()
400 struct mb_cache_entry *ce, *tmp; in mb_cache_shrink() local
406 ce = list_entry(l, struct mb_cache_entry, e_lru_list); in mb_cache_shrink()
407 if (ce->e_bdev == bdev) { in mb_cache_shrink()
408 list_del_init(&ce->e_lru_list); in mb_cache_shrink()
409 if (ce->e_used || ce->e_queued || in mb_cache_shrink()
410 atomic_read(&ce->e_refcnt)) in mb_cache_shrink()
416 hlist_bl_lock(ce->e_block_hash_p); in mb_cache_shrink()
417 hlist_bl_lock(ce->e_index_hash_p); in mb_cache_shrink()
419 if (ce->e_used || ce->e_queued || in mb_cache_shrink()
420 atomic_read(&ce->e_refcnt) || in mb_cache_shrink()
421 !list_empty(&ce->e_lru_list)) { in mb_cache_shrink()
422 hlist_bl_unlock(ce->e_index_hash_p); in mb_cache_shrink()
423 hlist_bl_unlock(ce->e_block_hash_p); in mb_cache_shrink()
428 __mb_cache_entry_unhash_unlock(ce); in mb_cache_shrink()
429 mb_assert(!(ce->e_used || ce->e_queued || in mb_cache_shrink()
430 atomic_read(&ce->e_refcnt))); in mb_cache_shrink()
431 list_add_tail(&ce->e_lru_list, &free_list); in mb_cache_shrink()
438 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) { in mb_cache_shrink()
439 __mb_cache_entry_forget(ce, GFP_KERNEL); in mb_cache_shrink()
455 struct mb_cache_entry *ce, *tmp; in mb_cache_destroy() local
458 list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) { in mb_cache_destroy()
459 if (ce->e_cache == cache) in mb_cache_destroy()
460 list_move_tail(&ce->e_lru_list, &free_list); in mb_cache_destroy()
465 list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) { in mb_cache_destroy()
466 list_del_init(&ce->e_lru_list); in mb_cache_destroy()
470 hlist_bl_lock(ce->e_block_hash_p); in mb_cache_destroy()
471 hlist_bl_lock(ce->e_index_hash_p); in mb_cache_destroy()
472 mb_assert(!(ce->e_used || ce->e_queued || in mb_cache_destroy()
473 atomic_read(&ce->e_refcnt))); in mb_cache_destroy()
474 __mb_cache_entry_unhash_unlock(ce); in mb_cache_destroy()
475 __mb_cache_entry_forget(ce, GFP_KERNEL); in mb_cache_destroy()
504 struct mb_cache_entry *ce; in mb_cache_entry_alloc() local
513 ce = list_entry(l, struct mb_cache_entry, e_lru_list); in mb_cache_entry_alloc()
514 if (ce->e_cache == cache) { in mb_cache_entry_alloc()
515 list_del_init(&ce->e_lru_list); in mb_cache_entry_alloc()
516 if (ce->e_used || ce->e_queued || in mb_cache_entry_alloc()
517 atomic_read(&ce->e_refcnt)) in mb_cache_entry_alloc()
524 hlist_bl_lock(ce->e_block_hash_p); in mb_cache_entry_alloc()
525 hlist_bl_lock(ce->e_index_hash_p); in mb_cache_entry_alloc()
527 if (ce->e_used || ce->e_queued || in mb_cache_entry_alloc()
528 atomic_read(&ce->e_refcnt) || in mb_cache_entry_alloc()
529 !list_empty(&ce->e_lru_list)) { in mb_cache_entry_alloc()
530 hlist_bl_unlock(ce->e_index_hash_p); in mb_cache_entry_alloc()
531 hlist_bl_unlock(ce->e_block_hash_p); in mb_cache_entry_alloc()
536 mb_assert(list_empty(&ce->e_lru_list)); in mb_cache_entry_alloc()
537 mb_assert(!(ce->e_used || ce->e_queued || in mb_cache_entry_alloc()
538 atomic_read(&ce->e_refcnt))); in mb_cache_entry_alloc()
539 __mb_cache_entry_unhash_unlock(ce); in mb_cache_entry_alloc()
546 ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags); in mb_cache_entry_alloc()
547 if (!ce) in mb_cache_entry_alloc()
550 INIT_LIST_HEAD(&ce->e_lru_list); in mb_cache_entry_alloc()
551 INIT_HLIST_BL_NODE(&ce->e_block_list); in mb_cache_entry_alloc()
552 INIT_HLIST_BL_NODE(&ce->e_index.o_list); in mb_cache_entry_alloc()
553 ce->e_cache = cache; in mb_cache_entry_alloc()
554 ce->e_queued = 0; in mb_cache_entry_alloc()
555 atomic_set(&ce->e_refcnt, 0); in mb_cache_entry_alloc()
557 ce->e_block_hash_p = &cache->c_block_hash[0]; in mb_cache_entry_alloc()
558 ce->e_index_hash_p = &cache->c_index_hash[0]; in mb_cache_entry_alloc()
559 ce->e_used = 1 + MB_CACHE_WRITER; in mb_cache_entry_alloc()
560 return ce; in mb_cache_entry_alloc()
579 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, in mb_cache_entry_insert() argument
582 struct mb_cache *cache = ce->e_cache; in mb_cache_entry_insert()
589 mb_assert(ce); in mb_cache_entry_insert()
600 mb_assert(!__mb_cache_entry_is_block_hashed(ce)); in mb_cache_entry_insert()
601 __mb_cache_entry_unhash_block(ce); in mb_cache_entry_insert()
602 __mb_cache_entry_unhash_index(ce); in mb_cache_entry_insert()
603 ce->e_bdev = bdev; in mb_cache_entry_insert()
604 ce->e_block = block; in mb_cache_entry_insert()
605 ce->e_block_hash_p = block_hash_p; in mb_cache_entry_insert()
606 ce->e_index.o_key = key; in mb_cache_entry_insert()
607 hlist_bl_add_head(&ce->e_block_list, block_hash_p); in mb_cache_entry_insert()
612 ce->e_index_hash_p = index_hash_p; in mb_cache_entry_insert()
613 hlist_bl_add_head(&ce->e_index.o_list, index_hash_p); in mb_cache_entry_insert()
627 mb_cache_entry_release(struct mb_cache_entry *ce) in mb_cache_entry_release() argument
629 __mb_cache_entry_release(ce); in mb_cache_entry_release()
638 mb_cache_entry_free(struct mb_cache_entry *ce) in mb_cache_entry_free() argument
640 mb_assert(ce); in mb_cache_entry_free()
641 mb_assert(list_empty(&ce->e_lru_list)); in mb_cache_entry_free()
642 hlist_bl_lock(ce->e_index_hash_p); in mb_cache_entry_free()
643 __mb_cache_entry_unhash_index(ce); in mb_cache_entry_free()
644 hlist_bl_unlock(ce->e_index_hash_p); in mb_cache_entry_free()
645 hlist_bl_lock(ce->e_block_hash_p); in mb_cache_entry_free()
646 __mb_cache_entry_unhash_block(ce); in mb_cache_entry_free()
647 hlist_bl_unlock(ce->e_block_hash_p); in mb_cache_entry_free()
648 __mb_cache_entry_release(ce); in mb_cache_entry_free()
666 struct mb_cache_entry *ce; in mb_cache_entry_get() local
674 hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) { in mb_cache_entry_get()
675 mb_assert(ce->e_block_hash_p == block_hash_p); in mb_cache_entry_get()
676 if (ce->e_bdev == bdev && ce->e_block == block) { in mb_cache_entry_get()
680 atomic_inc(&ce->e_refcnt); in mb_cache_entry_get()
682 __spin_lock_mb_cache_entry(ce); in mb_cache_entry_get()
683 atomic_dec(&ce->e_refcnt); in mb_cache_entry_get()
684 if (ce->e_used > 0) { in mb_cache_entry_get()
686 while (ce->e_used > 0) { in mb_cache_entry_get()
687 ce->e_queued++; in mb_cache_entry_get()
690 __spin_unlock_mb_cache_entry(ce); in mb_cache_entry_get()
692 __spin_lock_mb_cache_entry(ce); in mb_cache_entry_get()
693 ce->e_queued--; in mb_cache_entry_get()
697 ce->e_used += 1 + MB_CACHE_WRITER; in mb_cache_entry_get()
698 __spin_unlock_mb_cache_entry(ce); in mb_cache_entry_get()
700 if (!list_empty(&ce->e_lru_list)) { in mb_cache_entry_get()
702 list_del_init(&ce->e_lru_list); in mb_cache_entry_get()
705 if (!__mb_cache_entry_is_block_hashed(ce)) { in mb_cache_entry_get()
706 __mb_cache_entry_release(ce); in mb_cache_entry_get()
709 return ce; in mb_cache_entry_get()
725 struct mb_cache_entry *ce = in __mb_cache_entry_find() local
728 mb_assert(ce->e_index_hash_p == head); in __mb_cache_entry_find()
729 if (ce->e_bdev == bdev && ce->e_index.o_key == key) { in __mb_cache_entry_find()
733 atomic_inc(&ce->e_refcnt); in __mb_cache_entry_find()
735 __spin_lock_mb_cache_entry(ce); in __mb_cache_entry_find()
736 atomic_dec(&ce->e_refcnt); in __mb_cache_entry_find()
737 ce->e_used++; in __mb_cache_entry_find()
740 if (ce->e_used >= MB_CACHE_WRITER) { in __mb_cache_entry_find()
743 while (ce->e_used >= MB_CACHE_WRITER) { in __mb_cache_entry_find()
744 ce->e_queued++; in __mb_cache_entry_find()
747 __spin_unlock_mb_cache_entry(ce); in __mb_cache_entry_find()
749 __spin_lock_mb_cache_entry(ce); in __mb_cache_entry_find()
750 ce->e_queued--; in __mb_cache_entry_find()
754 __spin_unlock_mb_cache_entry(ce); in __mb_cache_entry_find()
755 if (!list_empty(&ce->e_lru_list)) { in __mb_cache_entry_find()
757 list_del_init(&ce->e_lru_list); in __mb_cache_entry_find()
760 if (!__mb_cache_entry_is_block_hashed(ce)) { in __mb_cache_entry_find()
761 __mb_cache_entry_release(ce); in __mb_cache_entry_find()
764 return ce; in __mb_cache_entry_find()
791 struct mb_cache_entry *ce = NULL; in mb_cache_entry_find_first() local
798 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key); in mb_cache_entry_find_first()
801 return ce; in mb_cache_entry_find_first()
830 struct mb_cache_entry *ce; in mb_cache_entry_find_next() local
838 ce = __mb_cache_entry_find(l, index_hash_p, bdev, key); in mb_cache_entry_find_next()
840 return ce; in mb_cache_entry_find_next()