Lines Matching refs:b
127 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump() local
135 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
138 if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) in bch_bkey_dump()
140 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
143 printk(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
167 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid() local
168 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
171 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() argument
177 if (mutex_trylock(&b->c->bucket_lock)) { in btree_ptr_bad_expensive()
179 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
180 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
184 (b->c->gc_mark_valid && in btree_ptr_bad_expensive()
189 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
194 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
196 btree_bug(b, in btree_ptr_bad_expensive()
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
205 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad() local
214 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
215 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
218 if (expensive_debug_checks(b->c) && in bch_btree_ptr_bad()
219 btree_ptr_bad_expensive(b, k)) in bch_btree_ptr_bad()
230 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup() local
233 btree_current_write(b)->prio_blocked++; in bch_btree_ptr_insert_fixup()
321 static bool bch_extent_insert_fixup(struct btree_keys *b, in bch_extent_insert_fixup() argument
326 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup()
407 if (bkey_written(b, k)) { in bch_extent_insert_fixup()
420 top = bch_bset_search(b, bset_tree_last(b), in bch_extent_insert_fixup()
422 bch_bset_insert(b, top, k); in bch_extent_insert_fixup()
426 bch_bset_insert(b, k, &temp.key); in bch_extent_insert_fixup()
432 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
442 if (bkey_written(b, k) && in bch_extent_insert_fixup()
451 bch_bset_fix_invalidated_key(b, k); in bch_extent_insert_fixup()
499 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid() local
500 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
503 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() argument
506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
509 if (mutex_trylock(&b->c->bucket_lock)) { in bch_extent_bad_expensive()
510 if (b->c->gc_mark_valid && in bch_extent_bad_expensive()
519 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
524 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
526 btree_bug(b, in bch_extent_bad_expensive()
528 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
535 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad() local
544 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
547 if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) in bch_extent_bad()
551 g = PTR_BUCKET(b->c, k, i); in bch_extent_bad()
552 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
554 btree_bug_on(stale > 96, b, in bch_extent_bad()
556 stale, b->c->need_gc); in bch_extent_bad()
559 b, "stale dirty pointer"); in bch_extent_bad()
564 if (expensive_debug_checks(b->c) && in bch_extent_bad()
565 bch_extent_bad_expensive(b, k, i)) in bch_extent_bad()
580 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge() local
583 if (key_merging_disabled(b->c)) in bch_extent_merge()
588 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) in bch_extent_merge()