Lines Matching refs:c
41 int64_t c = bkey_cmp(l.k, r.k); in bch_key_sort_cmp() local
43 return c ? c > 0 : l.k < r.k; in bch_key_sort_cmp()
46 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) in __ptr_invalid() argument
51 if (ptr_available(c, k, i)) { in __ptr_invalid()
52 struct cache *ca = PTR_CACHE(c, k, i); in __ptr_invalid()
53 size_t bucket = PTR_BUCKET_NR(c, k, i); in __ptr_invalid()
54 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in __ptr_invalid()
56 if (KEY_SIZE(k) + r > c->sb.bucket_size || in __ptr_invalid()
67 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) in bch_ptr_status() argument
72 if (ptr_available(c, k, i)) { in bch_ptr_status()
73 struct cache *ca = PTR_CACHE(c, k, i); in bch_ptr_status()
74 size_t bucket = PTR_BUCKET_NR(c, k, i); in bch_ptr_status()
75 size_t r = bucket_remainder(c, PTR_OFFSET(k, i)); in bch_ptr_status()
77 if (KEY_SIZE(k) + r > c->sb.bucket_size) in bch_ptr_status()
83 if (ptr_stale(c, k, i)) in bch_ptr_status()
135 size_t n = PTR_BUCKET_NR(b->c, k, j); in bch_bkey_dump()
138 if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) in bch_bkey_dump()
140 PTR_BUCKET(b->c, k, j)->prio); in bch_bkey_dump()
143 printk(" %s\n", bch_ptr_status(b->c, k)); in bch_bkey_dump()
148 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) in __bch_btree_ptr_invalid() argument
155 if (__ptr_invalid(c, k)) in __bch_btree_ptr_invalid()
161 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k)); in __bch_btree_ptr_invalid()
168 return __bch_btree_ptr_invalid(b->c, k); in bch_btree_ptr_invalid()
177 if (mutex_trylock(&b->c->bucket_lock)) { in btree_ptr_bad_expensive()
179 if (ptr_available(b->c, k, i)) { in btree_ptr_bad_expensive()
180 g = PTR_BUCKET(b->c, k, i); in btree_ptr_bad_expensive()
184 (b->c->gc_mark_valid && in btree_ptr_bad_expensive()
189 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
194 mutex_unlock(&b->c->bucket_lock); in btree_ptr_bad_expensive()
198 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), in btree_ptr_bad_expensive()
214 if (!ptr_available(b->c, k, i) || in bch_btree_ptr_bad()
215 ptr_stale(b->c, k, i)) in bch_btree_ptr_bad()
218 if (expensive_debug_checks(b->c) && in bch_btree_ptr_bad()
259 int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); in bch_extent_sort_cmp() local
261 return c ? c > 0 : l.k < r.k; in bch_extent_sort_cmp()
312 struct cache_set *c, in bch_subtract_dirty() argument
317 bcache_dev_sectors_dirty_add(c, KEY_INODE(k), in bch_subtract_dirty()
326 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup() local
404 bch_subtract_dirty(k, c, KEY_START(insert), in bch_extent_insert_fixup()
455 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); in bch_extent_insert_fixup()
470 bcache_dev_sectors_dirty_add(c, KEY_INODE(insert), in bch_extent_insert_fixup()
477 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) in __bch_extent_invalid() argument
487 if (__ptr_invalid(c, k)) in __bch_extent_invalid()
493 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k)); in __bch_extent_invalid()
500 return __bch_extent_invalid(b->c, k); in bch_extent_invalid()
506 struct bucket *g = PTR_BUCKET(b->c, k, ptr); in bch_extent_bad_expensive()
509 if (mutex_trylock(&b->c->bucket_lock)) { in bch_extent_bad_expensive()
510 if (b->c->gc_mark_valid && in bch_extent_bad_expensive()
519 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
524 mutex_unlock(&b->c->bucket_lock); in bch_extent_bad_expensive()
528 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), in bch_extent_bad_expensive()
544 if (!ptr_available(b->c, k, i)) in bch_extent_bad()
547 if (!expensive_debug_checks(b->c) && KEY_DIRTY(k)) in bch_extent_bad()
551 g = PTR_BUCKET(b->c, k, i); in bch_extent_bad()
552 stale = ptr_stale(b->c, k, i); in bch_extent_bad()
556 stale, b->c->need_gc); in bch_extent_bad()
564 if (expensive_debug_checks(b->c) && in bch_extent_bad()
583 if (key_merging_disabled(b->c)) in bch_extent_merge()
588 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) in bch_extent_merge()