Home
last modified time | relevance | path

Searched refs:rb_entry (Results 1 – 192 of 192) sorted by relevance

/linux-4.1.27/tools/perf/tests/
Dhists_output.c106 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries()
164 he = rb_entry(node, struct hist_entry, rb_node); in test1()
170 he = rb_entry(node, struct hist_entry, rb_node); in test1()
176 he = rb_entry(node, struct hist_entry, rb_node); in test1()
182 he = rb_entry(node, struct hist_entry, rb_node); in test1()
188 he = rb_entry(node, struct hist_entry, rb_node); in test1()
194 he = rb_entry(node, struct hist_entry, rb_node); in test1()
200 he = rb_entry(node, struct hist_entry, rb_node); in test1()
206 he = rb_entry(node, struct hist_entry, rb_node); in test1()
212 he = rb_entry(node, struct hist_entry, rb_node); in test1()
[all …]
Dvmlinux-kallsyms.c114 sym = rb_entry(nd, struct symbol, rb_node); in test__vmlinux_matches_kallsyms()
158 struct symbol *next = rb_entry(nnd, struct symbol, rb_node); in test__vmlinux_matches_kallsyms()
188 struct map *pos = rb_entry(nd, struct map, rb_node), *pair; in test__vmlinux_matches_kallsyms()
208 struct map *pos = rb_entry(nd, struct map, rb_node), *pair; in test__vmlinux_matches_kallsyms()
233 struct map *pos = rb_entry(nd, struct map, rb_node); in test__vmlinux_matches_kallsyms()
Dhists_common.c169 he = rb_entry(node, struct hist_entry, rb_node_in); in print_hists_in()
196 he = rb_entry(node, struct hist_entry, rb_node); in print_hists_out()
Dhists_cumulate.c140 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries()
201 node && (he = rb_entry(node, struct hist_entry, rb_node)); in do_test()
220 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); in do_test()
Dhists_link.c165 he = rb_entry(node, struct hist_entry, rb_node_in); in __validate_match()
217 he = rb_entry(node, struct hist_entry, rb_node_in); in __validate_link()
/linux-4.1.27/include/linux/
Dinterval_tree_generic.h49 subtree_last = rb_entry(node->ITRB.rb_left, \
55 subtree_last = rb_entry(node->ITRB.rb_right, \
76 parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
113 ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
132 node = rb_entry(node->ITRB.rb_right, \
149 node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
169 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
181 node = rb_entry(rb, ITSTRUCT, ITRB); \
Drbtree_augmented.h69 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
80 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
81 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
87 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
88 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
Drbtree.h50 #define rb_entry(ptr, type, member) container_of(ptr, type, member) macro
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
Delevator.h202 #define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
Dperf_event.h455 struct list_head rb_entry; member
/linux-4.1.27/fs/jffs2/
Dnodelist.h334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first()
344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last()
347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
[all …]
Dnodelist.c131 base = rb_entry(parent, struct jffs2_node_frag, rb); in jffs2_fragtree_insert()
537 frag = rb_entry(next, struct jffs2_node_frag, rb); in jffs2_lookup_node_frag()
Dreadinode.c185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); in jffs2_lookup_tn()
344 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); in jffs2_add_tn_to_tree()
432 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); in ver_insert()
/linux-4.1.27/fs/btrfs/
Dextent_map.c102 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
115 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
122 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
125 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
151 entry = rb_entry(n, struct extent_map, rb_node); in __tree_search()
167 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
174 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
177 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
233 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
252 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
[all …]
Dordered-data.c50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert()
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search()
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, in __tree_search()
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent, in tree_search()
329 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_first_ordered_pending()
401 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_ordered_pending()
450 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); in btrfs_get_logged_extents()
796 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_lookup_ordered_extent()
[all …]
Ddelayed-ref.c148 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); in tree_insert()
151 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, in tree_insert()
178 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); in htree_insert()
182 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, in htree_insert()
214 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); in find_ref_head()
228 entry = rb_entry(n, struct btrfs_delayed_ref_head, in find_ref_head()
293 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); in merge_ref()
361 ref = rb_entry(node, struct btrfs_delayed_ref_node, in btrfs_merge_delayed_refs()
433 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_select_ref_head()
Dfree-space-cache.c649 e = rb_entry(n, struct btrfs_free_space, offset_index); in merge_space_tree()
935 e = rb_entry(node, struct btrfs_free_space, offset_index); in write_cache_extent_entries()
1446 info = rb_entry(parent, struct btrfs_free_space, offset_index); in tree_insert_offset()
1509 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1533 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1547 prev = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1565 entry = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1579 prev = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1606 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1789 entry = rb_entry(node, struct btrfs_free_space, offset_index); in find_free_space()
[all …]
Dulist.c124 u = rb_entry(n, struct ulist_node, rb_node); in ulist_rbtree_search()
143 cur = rb_entry(parent, struct ulist_node, rb_node); in ulist_rbtree_insert()
Dextent_io.c257 entry = rb_entry(parent, struct tree_entry, rb_node); in tree_insert()
288 entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
308 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
315 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
318 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
374 other = rb_entry(other_node, struct extent_state, rb_node); in merge_state()
386 other = rb_entry(other_node, struct extent_state, rb_node); in merge_state()
444 found = rb_entry(node, struct extent_state, rb_node); in insert_state()
500 return rb_entry(next, struct extent_state, rb_node); in next_state()
634 state = rb_entry(node, struct extent_state, rb_node); in clear_extent_bit()
[all …]
Drelocation.c298 entry = rb_entry(parent, struct tree_entry, rb_node); in tree_insert()
319 entry = rb_entry(n, struct tree_entry, rb_node); in tree_search()
335 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, in backref_tree_panic()
562 node = rb_entry(rb_node, struct mapping_node, rb_node); in find_reloc_root()
870 upper = rb_entry(rb_node, struct backref_node,
987 upper = rb_entry(rb_node, struct backref_node,
1204 node = rb_entry(rb_node, struct backref_node, rb_node);
1215 node = rb_entry(rb_node, struct backref_node,
1318 node = rb_entry(rb_node, struct mapping_node, rb_node);
1347 node = rb_entry(rb_node, struct mapping_node, rb_node);
[all …]
Ddelayed-inode.c352 delayed_item = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
370 *prev = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
382 *next = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
422 item = rb_entry(parent_node, struct btrfs_delayed_item, in __btrfs_add_delayed_item()
511 item = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_first_delayed_insertion_item()
524 item = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_first_delayed_deletion_item()
537 next = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_next_delayed_item()
Dinode-map.c267 info = rb_entry(n, struct btrfs_free_space, offset_index); in btrfs_unpin_free_ino()
304 info = rb_entry(n, struct btrfs_free_space, offset_index); in recalculate_thresholds()
Dqgroup.c118 qgroup = rb_entry(n, struct btrfs_qgroup, node); in find_qgroup_rb()
139 qgroup = rb_entry(parent, struct btrfs_qgroup, node); in add_qgroup_rb()
464 qgroup = rb_entry(n, struct btrfs_qgroup, node); in btrfs_free_qgroup_config()
1387 cur = rb_entry(n, struct btrfs_qgroup_operation, n); in qgroup_oper_exists()
1436 cur = rb_entry(parent, struct btrfs_qgroup_operation, n); in insert_qgroup_oper()
1674 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n); in qgroup_account_deleted_refs()
1713 tmp_oper = rb_entry(n, struct btrfs_qgroup_operation, n); in qgroup_account_deleted_refs()
2889 qgroup = rb_entry(n, struct btrfs_qgroup, node); in qgroup_rescan_zero_tracking()
Dsend.c2799 entry = rb_entry(parent, struct orphan_dir_info, node); in add_orphan_dir_info()
2822 entry = rb_entry(n, struct orphan_dir_info, node); in get_orphan_dir_info()
2954 entry = rb_entry(parent, struct waiting_dir_move, node); in add_waiting_dir_move()
2977 entry = rb_entry(n, struct waiting_dir_move, node); in get_waiting_dir_move()
3025 entry = rb_entry(parent, struct pending_dir_move, node); in add_pending_dir_move()
3073 entry = rb_entry(n, struct pending_dir_move, node); in get_pending_dir_moves()
5913 pm = rb_entry(n, struct pending_dir_move, node); in btrfs_ioctl_send()
5930 dm = rb_entry(n, struct waiting_dir_move, node); in btrfs_ioctl_send()
5941 odi = rb_entry(n, struct orphan_dir_info, node); in btrfs_ioctl_send()
Dfile.c107 entry = rb_entry(parent, struct inode_defrag, rb_node); in __btrfs_add_inode_defrag()
239 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
253 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
273 defrag = rb_entry(node, struct inode_defrag, rb_node); in btrfs_cleanup_defrag_inodes()
Dextent-tree.c160 cache = rb_entry(parent, struct btrfs_block_group_cache, in btrfs_add_block_group_cache()
200 cache = rb_entry(n, struct btrfs_block_group_cache, in block_group_cache_tree_search()
2339 ref = rb_entry(node, struct btrfs_delayed_ref_node, in select_delayed_ref()
2592 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2597 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2603 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2838 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_run_delayed_refs()
2939 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); in check_delayed_ref()
3208 cache = rb_entry(node, struct btrfs_block_group_cache, in next_block_group()
9179 block_group = rb_entry(n, struct btrfs_block_group_cache, in btrfs_free_block_groups()
Dinode.c2205 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); in backref_insert()
2639 backref = rb_entry(node, struct sa_defrag_extent_backref, node); in relink_file_extents()
5026 em = rb_entry(node, struct extent_map, rb_node); in evict_inode_truncate_pages()
5045 state = rb_entry(node, struct extent_state, rb_node); in evict_inode_truncate_pages()
5354 entry = rb_entry(parent, struct btrfs_inode, rb_node); in inode_tree_add()
5414 entry = rb_entry(node, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
5425 entry = rb_entry(prev, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
5434 entry = rb_entry(node, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
Dbackref.c589 node = rb_entry(n, struct btrfs_delayed_ref_node, in __add_delayed_refs()
Ddisk-io.c4096 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_destroy_delayed_refs()
4110 ref = rb_entry(node, struct btrfs_delayed_ref_node, in btrfs_destroy_delayed_refs()
Dtransaction.c90 state = rb_entry(node, struct extent_state, rb_node); in clear_btree_io_tree()
Dvolumes.c1383 em = rb_entry(n, struct extent_map, rb_node); in find_next_chunk()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_interval.c11 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end()
56 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval()
97 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval()
144 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap()
173 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
/linux-4.1.27/fs/ext4/
Dblock_validity.c66 entry = rb_entry(parent, struct ext4_system_zone, node); in add_system_zone()
77 new_entry = rb_entry(new_node, struct ext4_system_zone, in add_system_zone()
99 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
111 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
130 entry = rb_entry(node, struct ext4_system_zone, node); in debug_print_tree()
209 entry = rb_entry(n, struct ext4_system_zone, node); in ext4_data_block_valid()
Dextents_status.c185 es = rb_entry(node, struct extent_status, rb_node); in ext4_es_print_tree()
214 es = rb_entry(node, struct extent_status, rb_node); in __es_tree_search()
228 return node ? rb_entry(node, struct extent_status, rb_node) : in __es_tree_search()
276 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_find_delayed_extent_range()
423 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_left()
447 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_right()
641 es = rb_entry(parent, struct extent_status, rb_node); in __es_insert_extent()
808 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_lookup_extent()
907 es = rb_entry(node, struct extent_status, rb_node); in __es_remove_extent()
920 es = rb_entry(node, struct extent_status, rb_node); in __es_remove_extent()
[all …]
Ddir.c452 fname = rb_entry(parent, struct fname, rb_hash); in ext4_htree_store_dirent()
575 fname = rb_entry(info->curr_node, struct fname, rb_hash); in ext4_dx_readdir()
583 fname = rb_entry(info->curr_node, struct fname, in ext4_dx_readdir()
Dmballoc.c3440 entry = rb_entry(n, struct ext4_free_data, efd_node); in ext4_mb_generate_from_freelist()
4583 entry = rb_entry(parent, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
4603 entry = rb_entry(node, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
4615 entry = rb_entry(node, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
/linux-4.1.27/security/keys/
Dproc.c91 struct key *key = rb_entry(n, struct key, serial_node); in key_serial_next()
111 struct key *key = rb_entry(n, struct key, serial_node); in find_ge_key()
134 minkey = rb_entry(n, struct key, serial_node); in find_ge_key()
157 struct key *key = rb_entry(n, struct key, serial_node); in key_node_serial()
180 struct key *key = rb_entry(_p, struct key, serial_node); in proc_keys_show()
275 struct key_user *user = rb_entry(n, struct key_user, node); in __key_user_next()
335 struct key_user *user = rb_entry(_p, struct key_user, node); in proc_key_users_show()
Dkey.c67 user = rb_entry(parent, struct key_user, node); in key_user_lookup()
158 xkey = rb_entry(parent, struct key, serial_node); in key_alloc_serial()
189 xkey = rb_entry(parent, struct key, serial_node); in key_alloc_serial()
624 key = rb_entry(n, struct key, serial_node); in key_lookup()
Dgc.c220 key = rb_entry(cursor, struct key, serial_node); in key_garbage_collector()
/linux-4.1.27/lib/
Drbtree_test.c31 if (key < rb_entry(parent, struct test_node, rb)->key) in insert()
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node, in augment_recompute()
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node, in augment_recompute()
76 parent = rb_entry(rb_parent, struct test_node, rb); in RB_DECLARE_CALLBACKS()
144 struct test_node *node = rb_entry(rb, struct test_node, rb); in check()
170 struct test_node *node = rb_entry(rb, struct test_node, rb); in check_augmented()
Dtimerqueue.c50 ptr = rb_entry(parent, struct timerqueue_node, node); in timerqueue_add()
81 rb_entry(rbn, struct timerqueue_node, node) : NULL; in timerqueue_del()
/linux-4.1.27/drivers/gpu/drm/
Ddrm_vma_manager.c169 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); in drm_vma_offset_lookup_locked()
202 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); in _drm_vma_offset_add_rb()
329 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_allow()
381 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_revoke()
422 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_is_allowed()
/linux-4.1.27/tools/perf/util/
Dsymbol.c154 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate()
157 next = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate()
185 curr = rb_entry(prevnd, struct symbol, rb_node); in symbols__fixup_end()
189 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_end()
208 curr = rb_entry(prevnd, struct map, rb_node); in __map_groups__fixup_end()
212 curr = rb_entry(nd, struct map, rb_node); in __map_groups__fixup_end()
291 pos = rb_entry(next, struct symbol, rb_node); in symbols__delete()
307 s = rb_entry(parent, struct symbol, rb_node); in symbols__insert()
327 struct symbol *s = rb_entry(n, struct symbol, rb_node); in symbols__find()
345 return rb_entry(n, struct symbol, rb_node); in symbols__first()
[all …]
Dmap.c234 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map__fixup_start()
244 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map__fixup_end()
432 struct map *pos = rb_entry(next, struct map, rb_node); in maps__delete()
505 struct map *pos = rb_entry(next, struct map, rb_node); in map_groups__flush()
544 struct map *pos = rb_entry(nd, struct map, rb_node); in map_groups__find_symbol_by_name()
581 struct map *pos = rb_entry(nd, struct map, rb_node); in __map_groups__fprintf_maps()
642 struct map *pos = rb_entry(next, struct map, rb_node); in map_groups__fixup_overlappings()
710 struct map *map = rb_entry(nd, struct map, rb_node); in map_groups__clone()
728 m = rb_entry(parent, struct map, rb_node); in maps__insert()
752 m = rb_entry(parent, struct map, rb_node); in maps__find()
[all …]
Dstrlist.h48 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__first()
56 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__next()
Dintlist.h48 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__first()
56 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__next()
Dhist.c171 n = rb_entry(next, struct hist_entry, rb_node); in hists__output_recalc_col_len()
267 n = rb_entry(next, struct hist_entry, rb_node); in hists__decay_entries()
283 n = rb_entry(next, struct hist_entry, rb_node); in hists__delete_entries()
384 he = rb_entry(parent, struct hist_entry, rb_node_in); in add_hist_entry()
966 iter = rb_entry(parent, struct hist_entry, rb_node_in); in hists__collapse_insert_entry()
1037 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__collapse_resort()
1114 iter = rb_entry(parent, struct hist_entry, rb_node); in __hists__insert_output_entry()
1147 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__output_resort()
1202 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__filter_by_dso()
1236 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__filter_by_thread()
[all …]
Dcallchain.c267 rnode = rb_entry(parent, struct callchain_node, rb_node); in rb_insert_callchain()
303 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_flat()
334 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_graph_abs()
364 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_graph_rel()
427 child = rb_entry(n, struct callchain_node, rb_node_in); in create_child()
551 first = rb_entry(p, struct callchain_node, rb_node_in); in split_add_child()
591 rnode = rb_entry(parent, struct callchain_node, rb_node_in); in append_chain_children()
Dmachine.c97 struct thread *t = rb_entry(nd, struct thread, rb_node); in machine__delete_threads()
152 pos = rb_entry(parent, struct machine, rb_node); in machines__add()
174 struct machine *machine = rb_entry(nd, struct machine, rb_node); in machines__set_symbol_filter()
187 struct machine *machine = rb_entry(nd, struct machine, rb_node); in machines__set_comm_exec()
205 machine = rb_entry(parent, struct machine, rb_node); in machines__find()
259 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__process_guests()
286 machine = rb_entry(node, struct machine, rb_node); in machines__set_id_hdr_size()
364 th = rb_entry(parent, struct thread, rb_node); in __machine__findnew_thread()
526 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__fprintf_dsos()
548 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__fprintf_dsos_buildid()
[all …]
Dbuild-id.c227 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__write_buildid_table()
266 struct machine *pos = rb_entry(nd, struct machine, rb_node); in dsos__hit_all()
512 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__cache_build_ids()
533 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__read_build_ids()
Ddso.c504 cache = rb_entry(next, struct dso_cache, rb_node); in dso_cache__free()
521 cache = rb_entry(parent, struct dso_cache, rb_node); in dso_cache__find()
546 cache = rb_entry(parent, struct dso_cache, rb_node); in dso_cache__insert()
791 struct dso *this = rb_entry(*p, struct dso, rb_node); in dso__findlink_by_longname()
1123 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); in dso__fprintf()
Dcomm.c55 iter = rb_entry(parent, struct comm_str, rb_node); in comm_str__findnew()
Dsymbol.h72 nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
Dannotate.c1094 iter = rb_entry(parent, struct source_line, node); in insert_source_line()
1137 iter = rb_entry(parent, struct source_line, node); in __resort_source_line()
1158 src_line = rb_entry(node, struct source_line, node); in resort_source_line()
1268 src_line = rb_entry(node, struct source_line, node); in print_summary()
Dthread-stack.c411 cp = rb_entry(node_parent, struct call_path, rb_node); in call_path__findnew()
Devent.c351 struct map *pos = rb_entry(nd, struct map, rb_node); in perf_event__synthesize_modules()
Dprobe-event.c175 struct map *pos = rb_entry(nd, struct map, rb_node); in kernel_get_module_map()
/linux-4.1.27/net/netfilter/
Dnft_rbtree.c45 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in nft_rbtree_lookup()
92 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in __nft_rbtree_insert()
153 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in nft_rbtree_deactivate()
185 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_walk()
228 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_destroy()
/linux-4.1.27/arch/powerpc/kernel/
Deeh_cache.c69 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_get_device()
116 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_print()
138 piar = rb_entry(parent, struct pci_io_addr_range, rb_node); in eeh_addr_cache_insert()
242 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_rmv_dev()
/linux-4.1.27/net/ceph/
Ddebugfs.c70 rb_entry(n, struct ceph_pg_pool_info, node); in osdmap_show()
89 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
100 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
127 req = rb_entry(rp, struct ceph_mon_generic_request, node); in monc_show()
153 req = rb_entry(p, struct ceph_osd_request, r_node); in osdc_show()
Dosdmap.c402 pg = rb_entry(parent, struct ceph_pg_mapping, node); in __insert_pg_mapping()
425 pg = rb_entry(n, struct ceph_pg_mapping, node); in __lookup_pg_mapping()
466 pi = rb_entry(parent, struct ceph_pg_pool_info, node); in __insert_pg_pool()
486 pi = rb_entry(n, struct ceph_pg_pool_info, node); in __lookup_pg_pool()
524 rb_entry(rbp, struct ceph_pg_pool_info, node); in ceph_pg_poolid_by_name()
661 rb_entry(rb_first(&map->pg_temp), in ceph_osdmap_destroy()
668 rb_entry(rb_first(&map->primary_temp), in ceph_osdmap_destroy()
675 rb_entry(rb_first(&map->pg_pools), in ceph_osdmap_destroy()
Dosd_client.c828 req = rb_entry(parent, struct ceph_osd_request, r_node); in __insert_request()
848 req = rb_entry(n, struct ceph_osd_request, r_node); in __lookup_request()
867 req = rb_entry(n, struct ceph_osd_request, r_node); in __lookup_request_ge()
1077 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), in remove_all_osds()
1169 osd = rb_entry(parent, struct ceph_osd, o_node); in __insert_osd()
1188 osd = rb_entry(n, struct ceph_osd, o_node); in __lookup_osd()
1939 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); in reset_changed_osds()
1970 req = rb_entry(p, struct ceph_osd_request, r_node); in kick_requests()
2225 event = rb_entry(parent, struct ceph_osd_event, node); in __insert_event()
2247 event = rb_entry(parent, struct ceph_osd_event, node); in __find_event()
Dmon_client.c422 req = rb_entry(n, struct ceph_mon_generic_request, node); in __lookup_generic_req()
442 req = rb_entry(parent, struct ceph_mon_generic_request, node); in __insert_generic_request()
716 req = rb_entry(p, struct ceph_mon_generic_request, node); in __resend_generic_request()
Dauth_x.c105 th = rb_entry(parent, struct ceph_x_ticket_handler, node); in get_ticket_handler()
652 rb_entry(p, struct ceph_x_ticket_handler, node); in ceph_x_destroy()
/linux-4.1.27/Documentation/
Drbtree.txt68 individual members may be accessed directly via rb_entry(node, type, member).
185 rb_entry(node, type, member).
191 printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
272 node = rb_entry(root->rb_node, struct interval_tree_node, rb);
277 rb_entry(node->rb.rb_left,
296 node = rb_entry(node->rb.rb_right,
313 subtree_last = rb_entry(node->rb.rb_left,
319 subtree_last = rb_entry(node->rb.rb_right,
331 rb_entry(rb, struct interval_tree_node, rb);
343 rb_entry(rb_old, struct interval_tree_node, rb);
[all …]
/linux-4.1.27/fs/ocfs2/
Dreservations.c98 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_dump_resv()
153 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_check_resmap()
288 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_resmap_clear_all_resv()
329 tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node); in ocfs2_resv_insert()
380 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_find_resv_lhs()
538 next_resv = rb_entry(next, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
575 next_resv = rb_entry(next, in __ocfs2_resv_find_window()
618 prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
Duptodate.c152 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); in ocfs2_purge_copied_metadata_tree()
230 item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); in ocfs2_search_cache_tree()
334 tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node); in __ocfs2_insert_cache_tree()
Drefcounttree.c209 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); in ocfs2_find_refcount_tree()
234 tmp = rb_entry(parent, struct ocfs2_refcount_tree, in ocfs2_insert_refcount_tree()
545 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); in ocfs2_purge_refcount_trees()
/linux-4.1.27/arch/arm/xen/
Dp2m.c40 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); in xen_add_phys_to_mach_entry()
70 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); in __pfn_to_mfn()
130 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); in __set_phys_to_machine_multi()
/linux-4.1.27/kernel/locking/
Drtmutex_common.h73 w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter, in rt_mutex_top_waiter()
88 return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter, in task_top_pi_waiter()
Drtmutex.c176 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); in rt_mutex_enqueue()
215 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); in rt_mutex_enqueue_pi()
/linux-4.1.27/tools/perf/
Dbuiltin-kmem.c71 data = rb_entry(*node, struct alloc_stat, node); in insert_alloc_stat()
114 data = rb_entry(*node, struct alloc_stat, node); in insert_caller_stat()
197 data = rb_entry(node, struct alloc_stat, node); in search_alloc_stat()
276 data = rb_entry(*node, struct page_stat, node); in search_page()
332 data = rb_entry(*node, struct page_stat, node); in search_page_alloc_stat()
531 struct alloc_stat *data = rb_entry(next, struct alloc_stat, in __print_slab_result()
599 data = rb_entry(next, struct page_stat, node); in __print_page_result()
718 this = rb_entry(*new, struct alloc_stat, node); in sort_slab_insert()
749 data = rb_entry(node, struct alloc_stat, node); in __sort_slab_result()
763 this = rb_entry(*new, struct page_stat, node); in sort_page_insert()
[all …]
Dbuiltin-annotate.c122 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); in hists__find_annotations()
Dbuiltin-diff.c436 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); in hists__baseline_only()
462 he = rb_entry(next, struct hist_entry, rb_node_in); in hists__precompute()
Dbuiltin-sched.c1287 data = rb_entry(node, struct work_atoms, node); in perf_sched__sort_lat()
1565 work_list = rb_entry(next, struct work_atoms, node); in perf_sched__lat()
Dbuiltin-top.c354 n = rb_entry(next, struct hist_entry, rb_node); in perf_top__prompt_symbol()
/linux-4.1.27/tools/perf/ui/browsers/
Dhists.c61 rb_entry(nd, struct hist_entry, rb_node); in hist_browser__get_folding()
165 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__count_rows_rb_tree()
207 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__count_rows()
231 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__init_have_children_rb_tree()
271 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__init_have_children()
315 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__set_folding_rb_tree()
356 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__set_folding()
384 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); in __hist_browser__set_folding()
453 struct hist_entry *h = rb_entry(browser->b.top, in hist_browser__run()
577 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); in hist_browser__show_callchain()
[all …]
Dmap.c23 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map_browser__write()
117 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); in map__browse()
Dannotate.c292 l = rb_entry(parent, struct browser_disasm_line, rb_node); in disasm_rb_tree__insert()
333 bpos = rb_entry(nd, struct browser_disasm_line, rb_node); in annotate_browser__set_rb_top()
/linux-4.1.27/mm/
Dinterval_tree.c43 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after()
48 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after()
Dmmap.c362 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
368 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
385 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in browse_rb()
428 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in validate_mm_rb()
566 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); in find_vma_links()
581 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in find_vma_links()
1728 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1737 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
1756 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
1769 vma = rb_entry(rb_parent(prev), in unmapped_area()
[all …]
Dnommu.c596 last = rb_entry(lastp, struct vm_region, vm_rb); in validate_nommu_regions()
601 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
602 last = rb_entry(lastp, struct vm_region, vm_rb); in validate_nommu_regions()
631 pregion = rb_entry(parent, struct vm_region, vm_rb); in add_nommu_region()
772 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); in add_vma_to_mm()
801 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm()
1349 pregion = rb_entry(rb, struct vm_region, vm_rb); in do_mmap_pgoff()
Dvmalloc.c299 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
321 tmp_va = rb_entry(parent, struct vmap_area, rb_node); in __insert_vmap_area()
337 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area()
400 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); in alloc_vmap_area()
417 tmp = rb_entry(n, struct vmap_area, rb_node); in alloc_vmap_area()
486 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); in __free_vmap_area()
2277 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; in node_to_va()
2300 va = rb_entry(n, struct vmap_area, rb_node); in pvm_find_next_prev()
Dksm.c749 stable_node = rb_entry(root_stable_tree[nid].rb_node, in remove_all_stable_nodes()
1178 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search()
1270 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_insert()
1341 tree_rmap_item = rb_entry(*new, struct rmap_item, node); in unstable_tree_search_insert()
2002 stable_node = rb_entry(node, struct stable_node, node); in ksm_check_stable_tree()
Dzswap.c244 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search()
267 myentry = rb_entry(parent, struct zswap_entry, rbnode); in zswap_rb_insert()
Dmempolicy.c2160 struct sp_node *p = rb_entry(n, struct sp_node, nd); in sp_lookup()
2176 w = rb_entry(prev, struct sp_node, nd); in sp_lookup()
2181 return rb_entry(n, struct sp_node, nd); in sp_lookup()
2194 nd = rb_entry(parent, struct sp_node, nd); in sp_insert()
2395 n = rb_entry(next, struct sp_node, nd); in shared_policy_replace()
2506 n = rb_entry(next, struct sp_node, nd); in mpol_free_shared_policy()
Dutil.c191 next = rb_entry(rb_parent, in __vma_link_list()
Dkmemleak.c411 rb_entry(rb, struct kmemleak_object, rb_node); in lookup_object()
576 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); in create_object()
Dmemcontrol.c639 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, in __mem_cgroup_insert_exceeded()
748 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); in __mem_cgroup_largest_soft_limit_node()
/linux-4.1.27/arch/x86/platform/uv/
Duv_irq.c72 e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); in uv_set_irq_2_mmr_info()
107 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); in uv_irq_2_mmr_info()
273 e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); in uv_teardown_irq()
/linux-4.1.27/fs/proc/
Dtask_nommu.c27 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_mem()
88 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_vsize()
106 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_statm()
212 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), in show_map()
Dnommu.c82 return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb)); in nommu_region_list_show()
Dproc_sysctl.c115 ctl_node = rb_entry(node, struct ctl_node, node); in find_entry()
149 parent_node = rb_entry(parent, struct ctl_node, node); in insert_entry()
352 ctl_node = rb_entry(node, struct ctl_node, node); in first_usable_entry()
/linux-4.1.27/fs/xfs/
Dxfs_extent_busy.c74 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node); in xfs_extent_busy_insert()
123 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_search()
323 rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_reuse()
370 rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_trim()
Dxfs_buf.c488 bp = rb_entry(parent, struct xfs_buf, b_rbnode); in _xfs_buf_find()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_verbs_mcast.c132 mcast = rb_entry(n, struct ipath_mcast, rb_node); in ipath_mcast_find()
178 tmcast = rb_entry(pn, struct ipath_mcast, rb_node); in ipath_mcast_add()
306 mcast = rb_entry(n, struct ipath_mcast, rb_node); in ipath_multicast_detach()
/linux-4.1.27/drivers/char/
Dmmtimer.c273 x = rb_entry(parent, struct mmtimer, list); in mmtimer_add_list()
288 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, in mmtimer_add_list()
309 x = rb_entry(n->next, struct mmtimer, list); in mmtimer_set_next_timer()
533 base = rb_entry(timers[indx].next, struct mmtimer, list); in mmtimer_interrupt()
567 x = rb_entry(mn->next, struct mmtimer, list); in mmtimer_tasklet()
626 t = rb_entry(n, struct mmtimer, list); in sgi_timer_del()
/linux-4.1.27/fs/logfs/
Dgc.c193 cur = rb_entry(parent, struct gc_candidate, rb_node); in add_list()
212 cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node); in add_list()
241 cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); in get_best_cand()
352 return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); in first_in_list()
597 cand = rb_entry(rb_first(&super->s_free_list.rb_tree), in logfs_journal_wl_pass()
705 cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate, in logfs_cleanup_list()
/linux-4.1.27/net/rxrpc/
Dar-connection.c92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node); in rxrpc_get_bundle()
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); in rxrpc_get_bundle()
252 xconn = rb_entry(parent, struct rxrpc_connection, node); in rxrpc_assign_connection_id()
292 xconn = rb_entry(parent, struct rxrpc_connection, node); in rxrpc_assign_connection_id()
316 xcall = rb_entry(parent, struct rxrpc_call, conn_node); in rxrpc_add_call_ID_to_conn()
644 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_incoming_connection()
687 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_incoming_connection()
774 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_find_connection()
Dar-call.c335 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
367 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
490 call = rb_entry(parent, struct rxrpc_call, conn_node); in rxrpc_incoming_call()
588 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_find_server_call()
791 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_release_calls_on_socket()
Dar-connevent.c39 call = rb_entry(p, struct rxrpc_call, conn_node); in rxrpc_abort_calls()
Dar-accept.c342 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_accept_call()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_verbs_mcast.c122 mcast = rb_entry(n, struct qib_mcast, rb_node); in qib_mcast_find()
167 tmcast = rb_entry(pn, struct qib_mcast, rb_node); in qib_mcast_add()
307 mcast = rb_entry(n, struct qib_mcast, rb_node); in qib_multicast_detach()
/linux-4.1.27/drivers/mtd/ubi/
Dwl.c161 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); in wl_tree_add()
257 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in in_wl_tree()
317 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_wl_entry()
324 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in find_wl_entry()
358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
711 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
732 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
982 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
[all …]
Dattach.c270 av = rb_entry(parent, struct ubi_ainf_volume, rb); in add_volume()
481 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in ubi_add_to_av()
612 av = rb_entry(p, struct ubi_ainf_volume, rb); in ubi_find_av()
639 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); in ubi_remove_av()
1148 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); in destroy_av()
1198 av = rb_entry(rb, struct ubi_ainf_volume, rb); in destroy_ai()
Dfastmap.c191 av = rb_entry(parent, struct ubi_ainf_volume, rb); in add_vol()
240 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in assign_aeb_to_av()
279 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in update_vol()
383 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); in process_pool_aeb()
423 av = rb_entry(node, struct ubi_ainf_volume, rb); in unmap_peb()
427 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); in unmap_peb()
Dfastmap-wl.c358 e = rb_entry(rb_next(root->rb_node), in may_reserve_for_fm()
Deba.c105 le = rb_entry(p, struct ubi_ltree_entry, rb); in ltree_lookup()
171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); in ltree_add_entry()
/linux-4.1.27/security/integrity/
Diint.c39 iint = rb_entry(n, struct integrity_iint_cache, rb_node); in __integrity_iint_find()
111 test_iint = rb_entry(parent, struct integrity_iint_cache, in integrity_inode_get()
/linux-4.1.27/tools/perf/ui/stdio/
Dhist.c100 child = rb_entry(node, struct callchain_node, rb_node); in __callchain__fprintf_graph()
178 cnode = rb_entry(node, struct callchain_node, rb_node); in callchain__fprintf_graph()
248 chain = rb_entry(rb_node, struct callchain_node, rb_node); in callchain__fprintf_flat()
462 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__fprintf()
/linux-4.1.27/fs/ubifs/
Dlog.c50 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_search_bud()
83 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_get_wbuf()
134 b = rb_entry(parent, struct ubifs_bud, rb); in ubifs_add_bud()
314 bud = rb_entry(p1, struct ubifs_bud, rb); in remove_buds()
559 dr = rb_entry(parent, struct done_ref, rb); in done_already()
Dorphan.c85 o = rb_entry(parent, struct ubifs_orphan, rb); in ubifs_add_orphan()
123 o = rb_entry(p, struct ubifs_orphan, rb); in ubifs_delete_orphan()
525 o = rb_entry(parent, struct ubifs_orphan, rb); in insert_dead_orphan()
755 o = rb_entry(p, struct ubifs_orphan, rb); in dbg_find_orphan()
782 o = rb_entry(parent, struct check_orphan, rb); in dbg_ins_check_orphan()
804 o = rb_entry(p, struct check_orphan, rb); in dbg_find_check_orphan()
Drecovery.c1266 e = rb_entry(parent, struct size_entry, rb); in add_ino()
1299 e = rb_entry(p, struct size_entry, rb); in find_ino()
1483 e = rb_entry(this, struct size_entry, rb); in ubifs_recover_size()
Ddebug.c623 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_budg()
711 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_lprop()
1807 fscki = rb_entry(parent, struct fsck_inode, rb); in add_inode()
1885 fscki = rb_entry(p, struct fsck_inode, rb); in search_inode()
2154 fscki = rb_entry(this, struct fsck_inode, rb); in check_inodes()
Dtnc_commit.c175 o = rb_entry(p, struct ubifs_old_idx, rb); in find_old_idx()
Dtnc.c91 o = rb_entry(parent, struct ubifs_old_idx, rb); in insert_old_idx()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dcm.c151 rb_entry(node, struct id_map_entry, node); in id_map_find_by_sl_id()
230 ent = rb_entry(parent, struct id_map_entry, node); in sl_id_map_add()
441 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean()
453 rb_entry(nd, struct id_map_entry, node); in mlx4_ib_cm_paravirt_clean()
Dmcg.c166 group = rb_entry(node, struct mcast_group, node); in mcast_find()
189 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
1102 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1233 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
/linux-4.1.27/drivers/base/regmap/
Dregcache-rbtree.c242 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); in regcache_rbtree_exit()
418 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, in regcache_rbtree_write()
468 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_sync()
508 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_drop()
Dregmap-debugfs.c542 range_node = rb_entry(next, struct regmap_range_node, node); in regmap_debugfs_init()
Dregmap.c421 range_node = rb_entry(next, struct regmap_range_node, node); in regmap_range_exit()
/linux-4.1.27/fs/afs/
Dcallback.c48 vnode = rb_entry(server->cb_promises.rb_node, in afs_init_callback_state()
153 vnode = rb_entry(p, struct afs_vnode, server_rb); in afs_break_one_callback()
384 vnode = rb_entry(rb_first(&server->cb_promises),
Dserver.c48 xserver = rb_entry(p, struct afs_server, master_rb); in afs_install_server()
193 server = rb_entry(p, struct afs_server, master_rb); in afs_find_server()
Dvnode.c32 vnode = rb_entry(node, struct afs_vnode, cb_promise);
85 xvnode = rb_entry(parent, struct afs_vnode, server_rb); in afs_install_vnode()
156 xvnode = rb_entry(parent, struct afs_vnode, cb_promise); in afs_vnode_note_promise()
/linux-4.1.27/kernel/power/
Dwakelock.c46 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks()
148 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add()
Dswap.c130 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert()
/linux-4.1.27/drivers/staging/android/ion/
Dion.c159 entry = rb_entry(parent, struct ion_buffer, node); in ion_buffer_add()
408 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); in ion_handle_lookup()
456 entry = rb_entry(parent, struct ion_handle, node); in ion_handle_add()
690 struct ion_handle *handle = rb_entry(n, struct ion_handle, in ion_debug_client_show()
728 struct ion_client *client = rb_entry(node, struct ion_client, in ion_get_client_serial()
791 entry = rb_entry(parent, struct ion_client, node); in ion_client_create()
834 struct ion_handle *handle = rb_entry(n, struct ion_handle, in ion_client_destroy()
1385 struct ion_handle *handle = rb_entry(n, in ion_debug_heap_total()
1407 struct ion_client *client = rb_entry(n, struct ion_client, in ion_debug_heap_show()
1428 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, in ion_debug_heap_show()
/linux-4.1.27/net/core/
Dgen_estimator.c158 e = rb_entry(parent, struct gen_estimator, node); in gen_add_node()
178 e = rb_entry(p, struct gen_estimator, node); in gen_find_node()
/linux-4.1.27/drivers/android/
Dbinder.c485 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
510 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer()
535 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_buffer_lookup()
686 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_buf()
706 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_buf()
874 node = rb_entry(n, struct binder_node, rb_node); in binder_get_node()
896 node = rb_entry(parent, struct binder_node, rb_node); in binder_new_node()
1012 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref()
1034 ref = rb_entry(parent, struct binder_ref, rb_node_node); in binder_get_ref_for_node()
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref_for_node()
[all …]
/linux-4.1.27/fs/ext2/
Dballoc.c213 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in __rsv_window_dump()
300 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in search_reserve_window()
317 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in search_reserve_window()
343 this = rb_entry(parent, struct ext2_reserve_window_node, rsv_node); in ext2_rsv_window_add()
813 rsv = rb_entry(next,struct ext2_reserve_window_node,rsv_node); in find_next_reservable_window()
1055 next_rsv = rb_entry(next, struct ext2_reserve_window_node, rsv_node); in try_to_extend_reservation()
/linux-4.1.27/fs/ext3/
Ddir.c373 fname = rb_entry(parent, struct fname, rb_hash); in ext3_htree_store_dirent()
493 fname = rb_entry(info->curr_node, struct fname, rb_hash); in ext3_dx_readdir()
501 fname = rb_entry(info->curr_node, struct fname, in ext3_dx_readdir()
Dballoc.c221 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); in __rsv_window_dump()
308 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); in search_reserve_window()
325 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node); in search_reserve_window()
352 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node); in ext3_rsv_window_add()
1027 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node); in find_next_reservable_window()
1275 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node); in try_to_extend_reservation()
/linux-4.1.27/drivers/mtd/
Dmtdswap.c92 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
94 #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
226 cur = rb_entry(parent, struct swap_eb, rb); in __mtdswap_rb_add()
453 median = rb_entry(medrb, struct swap_eb, rb)->erase_count; in mtdswap_check_counts()
625 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); in mtdswap_map_free_block()
910 eb = rb_entry(rb_first(rp), struct swap_eb, rb); in mtdswap_pick_gc_eblk()
1238 min[i] = rb_entry(rb_first(root), struct swap_eb, in mtdswap_show()
1240 max[i] = rb_entry(rb_last(root), struct swap_eb, in mtdswap_show()
/linux-4.1.27/net/rds/
Dcong.c113 map = rb_entry(parent, struct rds_cong_map, m_rb_node); in rds_cong_tree_walk()
398 map = rb_entry(node, struct rds_cong_map, m_rb_node); in rds_cong_exit()
Drdma.c75 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c116 tfp = rb_entry(parent, struct fw_page, rb_node); in insert_page()
151 tfp = rb_entry(tmp, struct fw_page, rb_node); in find_fw_page()
490 fwp = rb_entry(p, struct fw_page, rb_node); in mlx5_reclaim_startup_pages()
/linux-4.1.27/tools/lib/lockdep/
Dpreload.c107 l = rb_entry(*node, struct lock_lookup, node); in __get_lock_node()
184 return rb_entry(*node, struct lock_lookup, node); in __get_lock()
/linux-4.1.27/tools/perf/ui/gtk/
Dhists.c106 node = rb_entry(nd, struct callchain_node, rb_node); in perf_gtk__add_callchain()
224 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in perf_gtk__show_hists()
/linux-4.1.27/fs/fscache/
Dobject-list.c59 xobj = rb_entry(parent, struct fscache_object, objlist_link); in fscache_objlist_add()
114 obj = rb_entry(p, struct fscache_object, objlist_link); in fscache_objlist_lookup()
/linux-4.1.27/fs/dlm/
Ddebug_fs.c458 r = rb_entry(node, struct dlm_rsb, res_hashnode); in table_seq_start()
490 r = rb_entry(node, struct dlm_rsb, res_hashnode); in table_seq_start()
524 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next()
555 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next()
Drecover.c907 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_create_root_list()
944 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_clear_toss()
Dlockspace.c818 rsb = rb_entry(n, struct dlm_rsb, res_hashnode); in release_lockspace()
824 rsb = rb_entry(n, struct dlm_rsb, res_hashnode); in release_lockspace()
Dlock.c455 r = rb_entry(node, struct dlm_rsb, res_hashnode); in dlm_search_rsb_tree()
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb, in rsb_insert()
1095 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_dump_rsb_hash()
1672 r = rb_entry(n, struct dlm_rsb, res_hashnode); in shrink_bucket()
5477 r = rb_entry(n, struct dlm_rsb, res_hashnode); in find_grant_rsb()
/linux-4.1.27/arch/sh/kernel/
Ddwarf.c326 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); in dwarf_lookup_cie()
362 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); in dwarf_lookup_fde()
842 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); in dwarf_parse_cie()
928 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); in dwarf_parse_fde()
/linux-4.1.27/fs/
Deventpoll.c757 epi = rb_entry(rbp, struct epitem, rbn); in ep_free()
773 epi = rb_entry(rbp, struct epitem, rbn); in ep_free()
880 struct epitem *epi = rb_entry(rbp, struct epitem, rbn); in ep_show_fdinfo()
979 epi = rb_entry(rbp, struct epitem, rbn); in ep_find()
1115 epic = rb_entry(parent, struct epitem, rbn); in ep_rbtree_insert()
1692 epi = rb_entry(rbp, struct epitem, rbn); in ep_loop_check_proc()
/linux-4.1.27/kernel/sched/
Ddeadline.c169 entry = rb_entry(parent, struct task_struct, in enqueue_pushable_dl_task()
780 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); in dec_dl_deadline()
834 entry = rb_entry(parent, struct sched_dl_entity, rb_node); in __enqueue_dl_entity()
1091 return rb_entry(left, struct sched_dl_entity, rb_node); in pick_next_dl_entity()
1221 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); in pick_next_earliest_dl_task()
1374 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, in pick_next_pushable_dl_task()
Dfair.c470 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, in update_min_vruntime()
503 entry = rb_entry(parent, struct sched_entity, run_node); in __enqueue_entity()
546 return rb_entry(left, struct sched_entity, run_node); in __pick_first_entity()
556 return rb_entry(next, struct sched_entity, run_node); in __pick_next_entity()
567 return rb_entry(last, struct sched_entity, run_node); in __pick_last_entity()
/linux-4.1.27/net/sched/
Dsch_hfsc.c202 cl1 = rb_entry(parent, struct hfsc_class, el_node); in eltree_insert()
233 p = rb_entry(n, struct hfsc_class, el_node); in eltree_get_mindl()
251 return rb_entry(n, struct hfsc_class, el_node); in eltree_get_minel()
267 cl1 = rb_entry(parent, struct hfsc_class, vt_node); in vttree_insert()
297 p = rb_entry(n, struct hfsc_class, vt_node); in vttree_firstfit()
336 cl1 = rb_entry(parent, struct hfsc_class, cf_node); in cftree_insert()
668 p = rb_entry(n, struct hfsc_class, cf_node); in update_cfmin()
691 max_cl = rb_entry(n, struct hfsc_class, vt_node); in init_vf()
Dsch_htb.c277 c = rb_entry(parent, struct htb_class, node[prio]); in htb_add_to_id_tree()
311 c = rb_entry(parent, struct htb_class, pq_node); in htb_add_to_wait_tree()
707 cl = rb_entry(p, struct htb_class, pq_node); in htb_do_events()
736 rb_entry(n, struct htb_class, node[prio]); in htb_id_find_next_upper()
796 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
/linux-4.1.27/drivers/vfio/
Dvfio_iommu_type1.c96 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); in vfio_find_dma()
116 dma = rb_entry(parent, struct vfio_dma, node); in vfio_link_dma()
660 dma = rb_entry(n, struct vfio_dma, node); in vfio_iommu_replay()
846 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); in vfio_iommu_unmap_unpin_all()
/linux-4.1.27/fs/gfs2/
Drgrp.c511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_blk2rgrpd()
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_rgrpd_get_first()
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_rgrpd_get_next()
713 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in return_all_reservations()
726 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_clear_rgrpd()
870 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, in rgd_insert()
1453 rb_entry(*newn, struct gfs2_blkreserv, rs_node); in rs_insert()
1550 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
1566 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
2217 trs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_rgrp_dump()
/linux-4.1.27/fs/ceph/
Dxattr.c353 xattr = rb_entry(parent, struct ceph_inode_xattr, node); in __set_xattr()
446 xattr = rb_entry(parent, struct ceph_inode_xattr, node); in __get_xattr()
522 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __copy_xattr_names()
546 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __ceph_destroy_xattrs()
692 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __ceph_build_xattrs_blob()
Dcaps.c316 cap = rb_entry(n, struct ceph_cap, ci_node); in __get_cap_for_mds()
348 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_get_cap_mds()
380 cap = rb_entry(parent, struct ceph_cap, ci_node); in __insert_cap_node()
676 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued()
707 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued_other()
757 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued_mask()
784 cap = rb_entry(q, struct ceph_cap, in __ceph_caps_issued_mask()
808 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_revoking_other()
868 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_mds_wanted()
1105 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); in ceph_queue_caps_release()
[all …]
Dinode.c127 frag = rb_entry(parent, struct ceph_inode_frag, node); in __get_or_create_frag()
166 rb_entry(n, struct ceph_inode_frag, node); in __ceph_find_frag()
323 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
341 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
363 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
510 frag = rb_entry(n, struct ceph_inode_frag, node); in ceph_destroy_inode()
Ddebugfs.c61 req = rb_entry(rp, struct ceph_mds_request, r_node); in mdsc_show()
Dmds_client.c576 req = rb_entry(n, struct ceph_mds_request, r_node); in __lookup_request()
598 req = rb_entry(parent, struct ceph_mds_request, r_node); in __insert_request()
793 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); in __choose_mds()
1042 req = rb_entry(p, struct ceph_mds_request, r_node); in cleanup_session_requests()
1696 return rb_entry(rb_first(&mdsc->request_tree), in __get_oldest_req()
2209 req = rb_entry(p, struct ceph_mds_request, r_node); in kick_requests()
2701 req = rb_entry(p, struct ceph_mds_request, r_node); in replay_unsafe_requests()
2932 rb_entry(p, struct ceph_snap_realm, node); in send_mds_reconnect()
3485 nextreq = rb_entry(n, struct ceph_mds_request, r_node); in wait_unsafe_requests()
Dsnap.c89 r = rb_entry(parent, struct ceph_snap_realm, node); in __insert_snap_realm()
142 r = rb_entry(n, struct ceph_snap_realm, node); in __lookup_snap_realm()
/linux-4.1.27/net/802/
Dgarp.c158 attr = rb_entry(parent, struct garp_attr, node); in garp_attr_lookup()
179 attr = rb_entry(parent, struct garp_attr, node); in garp_attr_create()
391 attr = rb_entry(node, struct garp_attr, node); in garp_gid_event()
Dmrp.c247 attr = rb_entry(parent, struct mrp_attr, node); in mrp_attr_lookup()
268 attr = rb_entry(parent, struct mrp_attr, node); in mrp_attr_create()
577 attr = rb_entry(node, struct mrp_attr, node); in mrp_mad_event()
/linux-4.1.27/fs/cachefiles/
Dnamei.c111 object = rb_entry(p, struct cachefiles_object, active_node); in cachefiles_mark_object_buried()
168 xobject = rb_entry(_parent, in cachefiles_mark_object_active()
857 object = rb_entry(_n, struct cachefiles_object, active_node); in cachefiles_check_active()
/linux-4.1.27/kernel/events/
Duprobes.c394 uprobe = rb_entry(n, struct uprobe, rb_node); in __find_uprobe()
433 u = rb_entry(parent, struct uprobe, rb_node); in __insert_uprobe()
999 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1038 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1045 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
Dcore.c4370 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
4384 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4506 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
7551 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
/linux-4.1.27/drivers/infiniband/core/
Dmulticast.c138 group = rb_entry(node, struct mcast_group, node); in mcast_find()
162 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
763 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
Dcm.c510 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, in cm_insert_listen()
547 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); in cm_find_listen()
582 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, in cm_insert_remote_id()
608 timewait_info = rb_entry(node, struct cm_timewait_info, in cm_find_remote_id()
635 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, in cm_insert_remote_qpn()
665 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, in cm_insert_remote_sidr()
Duverbs_cmd.c660 scan = rb_entry(parent, struct xrcd_table_entry, node); in xrcd_table_insert()
685 entry = rb_entry(p, struct xrcd_table_entry, node); in xrcd_table_search()
/linux-4.1.27/fs/f2fs/
Ddata.c452 en = rb_entry(node, struct extent_node, rb_node); in __lookup_extent_tree()
476 prev = rb_entry(node, struct extent_node, rb_node); in __try_back_merge()
497 next = rb_entry(node, struct extent_node, rb_node); in __try_front_merge()
516 en = rb_entry(parent, struct extent_node, rb_node); in __insert_extent_tree()
554 en = rb_entry(node, struct extent_node, rb_node); in __free_extent_tree()
749 en = rb_entry(node, struct extent_node, rb_node); in f2fs_preserve_extent_tree()
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
Dipoib_multicast.c165 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in __ipoib_mcast_find()
190 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); in __ipoib_mcast_add()
964 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in ipoib_mcast_iter_next()
Dipoib_main.c264 path = rb_entry(n, struct ipoib_path, rb_node); in __path_find()
290 tpath = rb_entry(pn, struct ipoib_path, rb_node); in __path_add()
361 path = rb_entry(n, struct ipoib_path, rb_node); in ipoib_path_iter_next()
/linux-4.1.27/fs/nfs/
Dnfs4state.c180 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_get_renew_cred_server_locked()
395 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); in nfs4_find_state_owner_locked()
422 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); in nfs4_insert_state_owner_locked()
1563 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_reset_seqids()
1613 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_clear_reclaim_server()
1724 sp = rb_entry(pos, in nfs4_do_reclaim()
Dfscache.c103 xkey = rb_entry(parent, struct nfs_fscache_key, node); in nfs_fscache_get_super_cookie()
Ddir.c2196 entry = rb_entry(n, struct nfs_access_entry, rb_node); in __nfs_access_zap_cache()
2228 entry = rb_entry(n, struct nfs_access_entry, rb_node); in nfs_access_search_rbtree()
2318 entry = rb_entry(parent, struct nfs_access_entry, rb_node); in nfs_access_add_rbtree()
/linux-4.1.27/fs/nilfs2/
Dthe_nilfs.c727 root = rb_entry(n, struct nilfs_root, rb_node); in nilfs_lookup_root()
766 root = rb_entry(parent, struct nilfs_root, rb_node); in nilfs_find_or_create_root()
/linux-4.1.27/block/
Delevator.c305 __rq = rb_entry(parent, struct request, rb_node); in elv_rb_add()
332 rq = rb_entry(n, struct request, rb_node); in elv_rb_find()
Dcfq-iosched.c68 #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
1174 return rb_entry(root->left, struct cfq_queue, rb_node); in cfq_rb_first()
2037 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); in cfq_service_tree_add()
2075 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); in cfq_service_tree_add()
2114 cfqq = rb_entry(parent, struct cfq_queue, p_node); in cfq_prio_tree_lookup()
2609 __cfqq = rb_entry(parent, struct cfq_queue, p_node); in cfqq_close()
2620 __cfqq = rb_entry(node, struct cfq_queue, p_node); in cfqq_close()
Dblk-throttle.c84 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
/linux-4.1.27/drivers/block/
Dpktcdvd.c627 return rb_entry(n, struct pkt_rb_node, rb_node); in pkt_rbtree_next()
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_rbtree_find()
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); in pkt_rbtree_insert()
1218 first_node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
1236 node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
/linux-4.1.27/drivers/xen/
Devtchn.c530 evtchn = rb_entry(node, struct user_evtchn, node); in evtchn_release()
/linux-4.1.27/drivers/macintosh/
Dvia-pmu.c2032 struct rb_entry { struct
2064 struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; in pmu_pass_intr()
2121 struct rb_entry *rp = &pp->rb_buf[i]; in pmu_read()
/linux-4.1.27/fs/nfs/blocklayout/
Dextent_tree.c14 return rb_entry(node, struct pnfs_block_extent, be_node); in ext_node()
/linux-4.1.27/net/netfilter/ipset/
Dip_set_hash_netiface.c47 #define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
/linux-4.1.27/ipc/
Dmqueue.c129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); in msg_insert()
183 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); in msg_get()
/linux-4.1.27/arch/blackfin/kernel/
Dtrace.c131 vma = rb_entry(n, struct vm_area_struct, vm_rb); in decode_address()
/linux-4.1.27/fs/cifs/
Dconnect.c3819 tlink = rb_entry(node, struct tcon_link, tl_rbnode); in cifs_umount()
3970 tlink = rb_entry(node, struct tcon_link, tl_rbnode); in tlink_rb_search()
3990 tlink = rb_entry(*new, struct tcon_link, tl_rbnode); in tlink_rb_insert()
4118 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); in cifs_prune_tlinks()
/linux-4.1.27/net/wireless/
Dscan.c640 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); in rb_insert_bss()
669 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); in rb_find_bss()
/linux-4.1.27/fs/ocfs2/cluster/
Dnodemanager.c90 node = rb_entry(parent, struct o2nm_node, nd_ip_node); in o2nm_node_ip_tree_lookup()
Dtcp.c776 nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); in o2net_handler_tree_lookup()
/linux-4.1.27/fs/kernfs/
Ddir.c25 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
/linux-4.1.27/fs/fuse/
Dfile.c2660 ff = rb_entry(last, struct fuse_file, polled_node); in fuse_find_polled_node()
2756 ff = rb_entry(*link, struct fuse_file, polled_node); in fuse_notify_poll_wakeup()
Ddev.c2155 ff = rb_entry(p, struct fuse_file, polled_node); in end_polls()
/linux-4.1.27/drivers/md/
Ddm-crypt.c1174 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
Ddm-thin.c1709 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
/linux-4.1.27/fs/nfsd/
Dnfs4state.c2041 clp = rb_entry(*new, struct nfs4_client, cl_namenode); in add_clp_to_name_tree()
2062 clp = rb_entry(node, struct nfs4_client, cl_namenode); in find_clp_in_name_tree()