Lines Matching refs:b

150 static inline bool btree_node_ ## flag(struct btree *b)			\
151 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
153 static inline void set_btree_node_ ## flag(struct btree *b) \
154 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
166 static inline struct btree_write *btree_current_write(struct btree *b) in btree_current_write() argument
168 return b->writes + btree_node_write_idx(b); in btree_current_write()
171 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write() argument
173 return b->writes + (btree_node_write_idx(b) ^ 1); in btree_prev_write()
176 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first() argument
178 return b->keys.set->data; in btree_bset_first()
181 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last() argument
183 return bset_tree_last(&b->keys)->data; in btree_bset_last()
186 static inline unsigned bset_block_offset(struct btree *b, struct bset *i) in bset_block_offset() argument
188 return bset_sector_offset(&b->keys, i) >> b->c->block_bits; in bset_block_offset()
200 #define for_each_cached_btree(b, c, iter) \ argument
204 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
225 static inline void rw_lock(bool w, struct btree *b, int level) in rw_lock() argument
227 w ? down_write_nested(&b->lock, level + 1) in rw_lock()
228 : down_read_nested(&b->lock, level + 1); in rw_lock()
230 b->seq++; in rw_lock()
233 static inline void rw_unlock(bool w, struct btree *b) in rw_unlock() argument
236 b->seq++; in rw_unlock()
237 (w ? up_write : up_read)(&b->lock); in rw_unlock()