/linux-4.1.27/fs/hpfs/ |
D | anode.c | 14 struct bplus_header *btree, unsigned sec, in hpfs_bplus_lookup() argument 23 if (bp_internal(btree)) { in hpfs_bplus_lookup() 24 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup() 25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { in hpfs_bplus_lookup() 26 a = le32_to_cpu(btree->u.internal[i].down); in hpfs_bplus_lookup() 29 btree = &anode->btree; in hpfs_bplus_lookup() 36 for (i = 0; i < btree->n_used_nodes; i++) in hpfs_bplus_lookup() 37 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec && in hpfs_bplus_lookup() 38 … le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) { in hpfs_bplus_lookup() 39 …a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_sec… in hpfs_bplus_lookup() [all …]
|
D | map.c | 153 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != in hpfs_map_fnode() 154 (bp_internal(&fnode->btree) ? 12 : 8)) { in hpfs_map_fnode() 160 if (le16_to_cpu(fnode->btree.first_free) != in hpfs_map_fnode() 161 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) { in hpfs_map_fnode() 208 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != in hpfs_map_anode() 209 (bp_internal(&anode->btree) ? 60 : 40)) { in hpfs_map_anode() 213 if (le16_to_cpu(anode->btree.first_free) != in hpfs_map_anode() 214 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) { in hpfs_map_anode()
|
D | alloc.c | 465 f->btree.n_free_nodes = 8; in hpfs_alloc_fnode() 466 f->btree.first_free = cpu_to_le16(8); in hpfs_alloc_fnode() 482 a->btree.n_free_nodes = 40; in hpfs_alloc_anode() 483 a->btree.n_used_nodes = 0; in hpfs_alloc_anode() 484 a->btree.first_free = cpu_to_le16(8); in hpfs_alloc_anode()
|
D | hpfs.h | 454 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ member 496 struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */ member
|
D | namei.c | 85 fnode->btree.n_free_nodes = 7; in hpfs_mkdir() 86 fnode->btree.n_used_nodes = 1; in hpfs_mkdir() 87 fnode->btree.first_free = cpu_to_le16(0x14); in hpfs_mkdir()
|
D | file.c | 51 disk_secno = hpfs_bplus_lookup(inode->i_sb, inode, &fnode->btree, file_secno, bh); in hpfs_bmap()
|
D | ea.c | 43 hpfs_remove_btree(s, &anode->btree); in hpfs_ea_ext_remove()
|
/linux-4.1.27/fs/nilfs2/ |
D | btree.c | 71 static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, in nilfs_btree_get_new_block() argument 74 struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; in nilfs_btree_get_new_block() 124 static int nilfs_btree_node_size(const struct nilfs_bmap *btree) in nilfs_btree_node_size() argument 126 return 1 << btree->b_inode->i_blkbits; in nilfs_btree_node_size() 129 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap *btree) in nilfs_btree_nchildren_per_block() argument 131 return btree->b_nchildren_per_block; in nilfs_btree_nchildren_per_block() 416 nilfs_btree_get_root(const struct nilfs_bmap *btree) in nilfs_btree_get_root() argument 418 return (struct nilfs_btree_node *)btree->b_u.u_data; in nilfs_btree_get_root() 433 static int nilfs_btree_height(const struct nilfs_bmap *btree) in nilfs_btree_height() argument 435 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree)) + 1; in nilfs_btree_height() [all …]
|
D | Makefile | 3 btnode.o bmap.o btree.o direct.o dat.o recovery.o \
|
/linux-4.1.27/drivers/md/bcache/ |
D | btree.h | 116 struct btree { struct 128 struct btree *parent; argument 150 static inline bool btree_node_ ## flag(struct btree *b) \ argument 153 static inline void set_btree_node_ ## flag(struct btree *b) \ 166 static inline struct btree_write *btree_current_write(struct btree *b) in btree_current_write() 171 static inline struct btree_write *btree_prev_write(struct btree *b) in btree_prev_write() 176 static inline struct bset *btree_bset_first(struct btree *b) in btree_bset_first() 181 static inline struct bset *btree_bset_last(struct btree *b) in btree_bset_last() 186 static inline unsigned bset_block_offset(struct btree *b, struct bset *i) in bset_block_offset() 225 static inline void rw_lock(bool w, struct btree *b, int level) in rw_lock() [all …]
|
D | btree.c | 116 #define btree(fn, key, b, op, ...) \ macro 120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \ 140 struct btree *_b = (c)->root; \ 157 static inline struct bset *write_block(struct btree *b) in write_block() 162 static void bch_btree_init_next(struct btree *b) in bch_btree_init_next() 189 static uint64_t btree_csum_set(struct btree *b, struct bset *i) in btree_csum_set() 198 void bch_btree_node_read_done(struct btree *b) in bch_btree_node_read_done() 287 static void bch_btree_node_read(struct btree *b) in bch_btree_node_read() 325 static void btree_complete_write(struct btree *b, struct btree_write *w) in btree_complete_write() 342 struct btree *b = container_of(cl, struct btree, io); in btree_node_write_unlock() [all …]
|
D | extents.c | 127 struct btree *b = container_of(keys, struct btree, keys); in bch_bkey_dump() 167 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_invalid() 171 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) in btree_ptr_bad_expensive() 205 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_bad() 230 struct btree *b = container_of(bk, struct btree, keys); in bch_btree_ptr_insert_fixup() 326 struct cache_set *c = container_of(b, struct btree, keys)->c; in bch_extent_insert_fixup() 499 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_invalid() 503 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, in bch_extent_bad_expensive() 535 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_bad() 580 struct btree *b = container_of(bk, struct btree, keys); in bch_extent_merge()
|
D | debug.h | 10 void bch_btree_verify(struct btree *); 19 static inline void bch_btree_verify(struct btree *b) {} in bch_btree_verify()
|
D | Makefile | 4 bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
|
D | bcache.h | 219 struct btree; 288 int (*cache_miss)(struct btree *, struct search *, 621 struct btree *root; 624 struct btree *verify_data;
|
D | Kconfig | 6 a btree for indexing and the layout is optimized for SSDs.
|
D | debug.c | 30 void bch_btree_verify(struct btree *b) in bch_btree_verify() 32 struct btree *v = b->c->verify_data; in bch_btree_verify()
|
D | sysfs.c | 408 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) in bch_btree_bset_stats() 448 struct btree *b; in bch_root_usage() 471 struct btree *b; in bch_cache_size()
|
D | request.c | 502 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) in cache_lookup_fn() 776 static int cached_dev_cache_miss(struct btree *b, struct search *s, in cached_dev_cache_miss() 1037 static int flash_dev_cache_miss(struct btree *b, struct search *s, in flash_dev_cache_miss()
|
D | writeback.c | 476 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, in sectors_dirty_init_fn()
|
D | journal.c | 372 struct btree *b, *best; in btree_flush_write()
|
D | super.c | 1404 struct btree *b; in cache_set_flush()
|
/linux-4.1.27/fs/xfs/libxfs/ |
D | xfs_da_btree.c | 509 struct xfs_da_node_entry *btree; in xfs_da3_root_split() local 544 btree = dp->d_ops->node_tree_p(oldroot); in xfs_da3_root_split() 545 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot); in xfs_da3_root_split() 604 btree = dp->d_ops->node_tree_p(node); in xfs_da3_root_split() 605 btree[0].hashval = cpu_to_be32(blk1->hashval); in xfs_da3_root_split() 606 btree[0].before = cpu_to_be32(blk1->blkno); in xfs_da3_root_split() 607 btree[1].hashval = cpu_to_be32(blk2->hashval); in xfs_da3_root_split() 608 btree[1].before = cpu_to_be32(blk2->blkno); in xfs_da3_root_split() 624 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2)); in xfs_da3_root_split() 880 struct xfs_da_node_entry *btree; in xfs_da3_node_add() local [all …]
|
D | xfs_sb.c | 714 uint64_t btree = 0; in xfs_initialize_perag_data() local 735 btree += pag->pagf_btreeblks; in xfs_initialize_perag_data() 743 sbp->sb_fdblocks = bfree + bfreelst + btree; in xfs_initialize_perag_data()
|
D | xfs_attr_leaf.c | 959 struct xfs_da_node_entry *btree; in xfs_attr3_leaf_to_node() local 1000 btree = dp->d_ops->node_tree_p(node); in xfs_attr3_leaf_to_node() 1007 btree[0].hashval = entries[icleafhdr.count - 1].hashval; in xfs_attr3_leaf_to_node() 1008 btree[0].before = cpu_to_be32(blkno); in xfs_attr3_leaf_to_node()
|
/linux-4.1.27/drivers/md/persistent-data/ |
D | Makefile | 10 dm-btree.o \ 11 dm-btree-remove.o \ 12 dm-btree-spine.o
|
/linux-4.1.27/Documentation/device-mapper/ |
D | persistent-data.txt | 10 - Another btree-based caching target posted to dm-devel 68 dm-btree.[hc] 69 dm-btree-remove.c 70 dm-btree-spine.c 71 dm-btree-internal.h 73 Currently there is only one data structure, a hierarchical btree. 77 The btree is 'hierarchical' in that you can define it to be composed 79 thin-provisioning target uses a btree with two levels of nesting.
|
D | thin-provisioning.txt | 349 Reserve a copy of the data mapping btree for use by userland. 356 Release a previously reserved copy of the data mapping btree.
|
/linux-4.1.27/include/trace/events/ |
D | bcache.h | 63 TP_PROTO(struct btree *b), 236 TP_PROTO(struct btree *b), 241 TP_PROTO(struct btree *b), 260 TP_PROTO(struct btree *b), 270 TP_PROTO(struct btree *b), 310 TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status), 343 TP_PROTO(struct btree *b, unsigned keys), 360 TP_PROTO(struct btree *b, unsigned keys), 365 TP_PROTO(struct btree *b, unsigned keys), 370 TP_PROTO(struct btree *b),
|
/linux-4.1.27/fs/befs/ |
D | ChangeLog | 27 * Did the string comparison really right this time (btree.c) [WD] 30 a pointer value. (btree.c) [WD] 38 keys within btree nodes, rather than the linear search we were using 39 before. (btree.c) [Sergey S. Kostyliov <rathamahata@php4.ru>] 56 (btree.c) [WD] 105 * Removed notion of btree handle from btree.c. It was unnecessary, as the 128 (btree.c) [WD] 133 seekleaf() in btree.c [WD] 148 (datastream.c, btree.c super.c inode.c) [WD] 253 * Fix bug with reading an empty directory. (btree.c and dir.c) [all …]
|
D | Makefile | 7 befs-objs := datastream.o btree.o super.o inode.o debug.o io.o linuxvfs.o
|
/linux-4.1.27/Documentation/ |
D | bcache.txt | 10 in erase block sized buckets, and it uses a hybrid btree/log to track cached 182 the way cache coherency is handled for cache misses. If a btree node is full, 187 cause the btree node to be split, and you need almost no write traffic for 188 this to not show up enough to be noticeable (especially since bcache's btree 313 Average data per key in the btree. 322 Amount of memory currently used by the btree cache 357 Percentage of the root btree node in use. If this gets too high the node 365 Depth of the btree (A single node btree has depth 0). 375 duration: garbage collection, btree read, btree node sorts and btree splits. 381 Total nodes in the btree. [all …]
|
/linux-4.1.27/fs/xfs/ |
D | xfs_attr_inactive.c | 220 struct xfs_da_node_entry *btree; in xfs_attr3_node_inactive() local 238 btree = dp->d_ops->node_tree_p(node); in xfs_attr3_node_inactive() 239 child_fsb = be32_to_cpu(btree[0].before); in xfs_attr3_node_inactive() 305 child_fsb = be32_to_cpu(btree[i + 1].before); in xfs_attr3_node_inactive()
|
D | xfs_attr_list.c | 226 struct xfs_da_node_entry *btree; in xfs_attr_node_list() local 316 btree = dp->d_ops->node_tree_p(node); in xfs_attr_node_list() 317 for (i = 0; i < nodehdr.count; btree++, i++) { in xfs_attr_node_list() 319 <= be32_to_cpu(btree->hashval)) { in xfs_attr_node_list() 320 cursor->blkno = be32_to_cpu(btree->before); in xfs_attr_node_list() 322 btree); in xfs_attr_node_list()
|
D | xfs_trace.h | 165 struct xfs_da_node_entry *btree), 166 TP_ARGS(ctx, btree), 193 __entry->bt_hashval = be32_to_cpu(btree->hashval); 194 __entry->bt_before = be32_to_cpu(btree->before);
|
/linux-4.1.27/fs/hfs/ |
D | Makefile | 7 hfs-objs := bitmap.o bfind.o bnode.o brec.o btree.o \
|
/linux-4.1.27/fs/hfsplus/ |
D | Makefile | 7 hfsplus-objs := super.o options.o inode.o ioctl.o extents.o catalog.o dir.o btree.o \
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-block-bcache | 134 For a cache, height of the btree excluding leaf nodes (i.e. a 141 Number of btree buckets/nodes that are currently cached in 156 For a cache, sum of all btree writes in human readable units.
|
/linux-4.1.27/Documentation/filesystems/ |
D | xfs-self-describing-metadata.txt | 49 pointers in a btree end up with loops in them) are the key to understanding what 104 determine the scope of the corruption. For example, if we have a extent btree 114 freespace btree blocks are owned by an allocation group. Hence the size and 117 freespace btree block written to the wrong AG). 129 when the free space btree block that contains the block was last written 207 - short btree blocks have a 32 bit owner (ag number) and a 32 bit block
|
D | hfs.txt | 82 in btree routines derived from Brad Boyer's hfsplus driver.
|
D | xfs-delayed-logging-design.txt | 54 keeps relogging the inode and btree buffers as they get modified in each
|
/linux-4.1.27/lib/ |
D | Makefile | 58 obj-$(CONFIG_BTREE) += btree.o
|