node_size         452 drivers/media/platform/sti/bdisp/bdisp-hw.c 	unsigned int i, node_size = sizeof(struct bdisp_node);
node_size         457 drivers/media/platform/sti/bdisp/bdisp-hw.c 	base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
node_size         464 drivers/media/platform/sti/bdisp/bdisp-hw.c 	memset(base, 0, node_size * MAX_NB_NODE);
node_size         471 drivers/media/platform/sti/bdisp/bdisp-hw.c 		base += node_size;
node_size         472 drivers/media/platform/sti/bdisp/bdisp-hw.c 		paddr += node_size;
node_size         714 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c 	size_t node_size;
node_size         720 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c 	node_size = chain->cell_size;
node_size         732 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c 		dma_free_coherent(&pdev->dev, node_size, node,
node_size         219 fs/befs/befs_fs_types.h 	fs32 node_size;
node_size         229 fs/befs/befs_fs_types.h 	u32 node_size;
node_size         152 fs/befs/btree.c 	sup->node_size = fs32_to_cpu(sb, od_sup->node_size);
node_size         233 fs/befs/debug.c 	befs_debug(sb, "  node_size %u", fs32_to_cpu(sb, super->node_size));
node_size         146 fs/hfs/bnode.c 	off = node->tree->node_size - 2;
node_size         277 fs/hfs/bnode.c 	off = (loff_t)cnid * tree->node_size;
node_size         364 fs/hfs/bnode.c 	rec_off = tree->node_size - 2;
node_size         372 fs/hfs/bnode.c 		    next_off > tree->node_size ||
node_size         429 fs/hfs/bnode.c 	       min((int)PAGE_SIZE, (int)tree->node_size));
node_size          24 fs/hfs/brec.c  	dataoff = node->tree->node_size - (rec + 2) * 2;
node_size          45 fs/hfs/brec.c  		recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2);
node_size          94 fs/hfs/brec.c  	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
node_size         117 fs/hfs/brec.c  	idx_rec_off = tree->node_size - (rec + 1) * 2;
node_size         186 fs/hfs/brec.c  	rec_off = tree->node_size - (fd->record + 2) * 2;
node_size         187 fs/hfs/brec.c  	end_off = tree->node_size - (node->num_recs + 1) * 2;
node_size         264 fs/hfs/brec.c  	size = tree->node_size / 2 - node->num_recs * 2 - 14;
node_size         265 fs/hfs/brec.c  	old_rec_off = tree->node_size - 4;
node_size         300 fs/hfs/brec.c  	new_rec_off = tree->node_size - 2;
node_size         384 fs/hfs/brec.c  	rec_off = tree->node_size - (rec + 2) * 2;
node_size         385 fs/hfs/brec.c  	end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
node_size         400 fs/hfs/brec.c  			rec_off = tree->node_size - (rec + 2) * 2;
node_size         401 fs/hfs/brec.c  			end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
node_size         499 fs/hfs/brec.c  	rec = tree->node_size - 2;
node_size          91 fs/hfs/btree.c 	tree->node_size = be16_to_cpu(head->node_size);
node_size          95 fs/hfs/btree.c 	size = tree->node_size;
node_size         120 fs/hfs/btree.c 	tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
node_size         208 fs/hfs/btree.c 	hfs_bnode_clear(node, 0, tree->node_size);
node_size         217 fs/hfs/btree.c 	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
node_size         218 fs/hfs/btree.c 	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
node_size          31 fs/hfs/btree.h 	unsigned int node_size;
node_size         146 fs/hfs/btree.h 	__be16 node_size;	/* (F) The number of bytes in a node (=512) */
node_size          96 fs/hfs/inode.c 	if (tree->node_size >= PAGE_SIZE) {
node_size         310 fs/hfsplus/bnode.c 	off = node->tree->node_size - 2;
node_size         532 fs/hfsplus/bnode.c 	rec_off = tree->node_size - 2;
node_size         540 fs/hfsplus/bnode.c 		    next_off > tree->node_size ||
node_size         597 fs/hfsplus/bnode.c 	       min_t(int, PAGE_SIZE, tree->node_size));
node_size         644 fs/hfsplus/bnode.c 				hfs_bnode_clear(node, 0, tree->node_size);
node_size          25 fs/hfsplus/brec.c 	dataoff = node->tree->node_size - (rec + 2) * 2;
node_size          45 fs/hfsplus/brec.c 			node->tree->node_size - (rec + 1) * 2);
node_size          48 fs/hfsplus/brec.c 		if (recoff > node->tree->node_size - 2) {
node_size          92 fs/hfsplus/brec.c 	end_rec_off = tree->node_size - (node->num_recs + 1) * 2;
node_size         117 fs/hfsplus/brec.c 	idx_rec_off = tree->node_size - (rec + 1) * 2;
node_size         188 fs/hfsplus/brec.c 	rec_off = tree->node_size - (fd->record + 2) * 2;
node_size         189 fs/hfsplus/brec.c 	end_off = tree->node_size - (node->num_recs + 1) * 2;
node_size         268 fs/hfsplus/brec.c 	size = tree->node_size / 2 - node->num_recs * 2 - 14;
node_size         269 fs/hfsplus/brec.c 	old_rec_off = tree->node_size - 4;
node_size         304 fs/hfsplus/brec.c 	new_rec_off = tree->node_size - 2;
node_size         389 fs/hfsplus/brec.c 	rec_off = tree->node_size - (rec + 2) * 2;
node_size         390 fs/hfsplus/brec.c 	end_rec_off = tree->node_size - (parent->num_recs + 1) * 2;
node_size         405 fs/hfsplus/brec.c 			rec_off = tree->node_size - (rec + 2) * 2;
node_size         406 fs/hfsplus/brec.c 			end_rec_off = tree->node_size -
node_size         503 fs/hfsplus/brec.c 	rec = tree->node_size - 2;
node_size          75 fs/hfsplus/btree.c u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
node_size          78 fs/hfsplus/btree.c 	u32 mod = max(node_size, block_size);
node_size         102 fs/hfsplus/btree.c 		if (clump_size < (8 * node_size))
node_size         103 fs/hfsplus/btree.c 			clump_size = 8 * node_size;
node_size         175 fs/hfsplus/btree.c 	tree->node_size = be16_to_cpu(head->node_size);
node_size         231 fs/hfsplus/btree.c 	size = tree->node_size;
node_size         240 fs/hfsplus/btree.c 		(tree->node_size + PAGE_SIZE - 1) >>
node_size         330 fs/hfsplus/btree.c 	hfs_bnode_clear(node, 0, tree->node_size);
node_size         339 fs/hfsplus/btree.c 	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
node_size         340 fs/hfsplus/btree.c 	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
node_size          87 fs/hfsplus/hfsplus_fs.h 	unsigned int node_size;
node_size         394 fs/hfsplus/hfsplus_fs.h u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size, u64 sectors,
node_size         173 fs/hfsplus/hfsplus_raw.h 	__be16 node_size;
node_size          91 fs/hfsplus/inode.c 	if (tree->node_size >= PAGE_SIZE) {
node_size          55 fs/hfsplus/xattr.c 					char *buf, u16 node_size)
node_size          68 fs/hfsplus/xattr.c 		clump_size, node_size);
node_size          71 fs/hfsplus/xattr.c 	rec_offsets = (__be16 *)(buf + node_size);
node_size          80 fs/hfsplus/xattr.c 	head->node_size = cpu_to_be16(node_size);
node_size          82 fs/hfsplus/xattr.c 	do_div(tmp, node_size);
node_size          93 fs/hfsplus/xattr.c 	hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
node_size          99 fs/hfsplus/xattr.c 		map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
node_size         128 fs/hfsplus/xattr.c 	u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
node_size         180 fs/hfsplus/xattr.c 						    node_size,
node_size         205 fs/hfsplus/xattr.c 	buf = kzalloc(node_size, GFP_NOFS);
node_size         212 fs/hfsplus/xattr.c 	hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
node_size         218 fs/hfsplus/xattr.c 	for (; written < node_size; index++, written += PAGE_SIZE) {
node_size         229 fs/hfsplus/xattr.c 			min_t(size_t, PAGE_SIZE, node_size - written));