Lines Matching refs:b

226 static inline struct bset_tree *bset_tree_last(struct btree_keys *b)  in bset_tree_last()  argument
228 return b->set + b->nsets; in bset_tree_last()
231 static inline bool bset_written(struct btree_keys *b, struct bset_tree *t) in bset_written() argument
233 return t <= b->set + b->nsets - b->last_set_unwritten; in bset_written()
236 static inline bool bkey_written(struct btree_keys *b, struct bkey *k) in bkey_written() argument
238 return !b->last_set_unwritten || k < b->set[b->nsets].data->start; in bkey_written()
241 static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i) in bset_byte_offset() argument
243 return ((size_t) i) - ((size_t) b->set->data); in bset_byte_offset()
246 static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i) in bset_sector_offset() argument
248 return bset_byte_offset(b, i) >> 9; in bset_sector_offset()
259 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) in bch_btree_keys_u64s_remaining() argument
261 struct bset_tree *t = bset_tree_last(b); in bch_btree_keys_u64s_remaining()
263 BUG_ON((PAGE_SIZE << b->page_order) < in bch_btree_keys_u64s_remaining()
264 (bset_byte_offset(b, t->data) + set_bytes(t->data))); in bch_btree_keys_u64s_remaining()
266 if (!b->last_set_unwritten) in bch_btree_keys_u64s_remaining()
269 return ((PAGE_SIZE << b->page_order) - in bch_btree_keys_u64s_remaining()
270 (bset_byte_offset(b, t->data) + set_bytes(t->data))) / in bch_btree_keys_u64s_remaining()
274 static inline struct bset *bset_next_set(struct btree_keys *b, in bset_next_set() argument
277 struct bset *i = bset_tree_last(b)->data; in bset_next_set()
308 struct btree_keys *b; member
331 static inline struct bkey *bch_bset_search(struct btree_keys *b, in bch_bset_search() argument
335 return search ? __bch_bset_search(b, t, search) : t->data->start; in bch_bset_search()
338 #define for_each_key_filter(b, k, iter, filter) \ argument
339 for (bch_btree_iter_init((b), (iter), NULL); \
340 ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
342 #define for_each_key(b, k, iter) \ argument
343 for (bch_btree_iter_init((b), (iter), NULL); \
367 static inline void bch_btree_sort(struct btree_keys *b, in bch_btree_sort() argument
370 bch_btree_sort_partial(b, 0, state); in bch_btree_sort()
435 static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k) in bch_ptr_invalid() argument
437 return b->ops->key_invalid(b, k); in bch_ptr_invalid()
440 static inline bool bch_ptr_bad(struct btree_keys *b, const struct bkey *k) in bch_ptr_bad() argument
442 return b->ops->key_bad(b, k); in bch_ptr_bad()
445 static inline void bch_bkey_to_text(struct btree_keys *b, char *buf, in bch_bkey_to_text() argument
448 return b->ops->key_to_text(buf, size, k); in bch_bkey_to_text()
539 static inline int __bch_count_data(struct btree_keys *b) { return -1; } in __bch_count_data() argument
540 static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} in __bch_check_keys() argument
541 static inline void bch_dump_bucket(struct btree_keys *b) {} in bch_dump_bucket() argument
546 static inline bool btree_keys_expensive_checks(struct btree_keys *b) in btree_keys_expensive_checks() argument
549 return *b->expensive_debug_checks; in btree_keys_expensive_checks()
555 static inline int bch_count_data(struct btree_keys *b) in bch_count_data() argument
557 return btree_keys_expensive_checks(b) ? __bch_count_data(b) : -1; in bch_count_data()
560 #define bch_check_keys(b, ...) \ argument
562 if (btree_keys_expensive_checks(b)) \
563 __bch_check_keys(b, __VA_ARGS__); \