Home
last modified time | relevance | path

Searched refs:rb_entry (Results 1 – 190 of 190) sorted by relevance

/linux-4.4.14/tools/perf/tests/
Dhists_output.c110 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries()
168 he = rb_entry(node, struct hist_entry, rb_node); in test1()
174 he = rb_entry(node, struct hist_entry, rb_node); in test1()
180 he = rb_entry(node, struct hist_entry, rb_node); in test1()
186 he = rb_entry(node, struct hist_entry, rb_node); in test1()
192 he = rb_entry(node, struct hist_entry, rb_node); in test1()
198 he = rb_entry(node, struct hist_entry, rb_node); in test1()
204 he = rb_entry(node, struct hist_entry, rb_node); in test1()
210 he = rb_entry(node, struct hist_entry, rb_node); in test1()
216 he = rb_entry(node, struct hist_entry, rb_node); in test1()
[all …]
Dvmlinux-kallsyms.c115 sym = rb_entry(nd, struct symbol, rb_node); in test__vmlinux_matches_kallsyms()
159 struct symbol *next = rb_entry(nnd, struct symbol, rb_node); in test__vmlinux_matches_kallsyms()
Dhists_common.c173 he = rb_entry(node, struct hist_entry, rb_node_in); in print_hists_in()
200 he = rb_entry(node, struct hist_entry, rb_node); in print_hists_out()
Dhists_cumulate.c144 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries()
205 node && (he = rb_entry(node, struct hist_entry, rb_node)); in do_test()
224 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); in do_test()
Dhists_link.c169 he = rb_entry(node, struct hist_entry, rb_node_in); in __validate_match()
221 he = rb_entry(node, struct hist_entry, rb_node_in); in __validate_link()
/linux-4.4.14/include/linux/
Dinterval_tree_generic.h49 subtree_last = rb_entry(node->ITRB.rb_left, \
55 subtree_last = rb_entry(node->ITRB.rb_right, \
76 parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
113 ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
132 node = rb_entry(node->ITRB.rb_right, \
149 node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
169 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
181 node = rb_entry(rb, ITSTRUCT, ITRB); \
Drbtree_augmented.h69 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
80 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
81 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
87 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
88 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
Drbtree.h51 #define rb_entry(ptr, type, member) container_of(ptr, type, member) macro
100 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
Delevator.h204 #define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
Dperf_event.h546 struct list_head rb_entry; member
/linux-4.4.14/fs/jffs2/
Dnodelist.h334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first()
344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last()
347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
[all …]
Dnodelist.c131 base = rb_entry(parent, struct jffs2_node_frag, rb); in jffs2_fragtree_insert()
537 frag = rb_entry(next, struct jffs2_node_frag, rb); in jffs2_lookup_node_frag()
Dreadinode.c185 tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); in jffs2_lookup_tn()
344 insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); in jffs2_add_tn_to_tree()
432 this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); in ver_insert()
/linux-4.4.14/fs/btrfs/
Dextent_map.c102 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
115 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
122 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
125 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
151 entry = rb_entry(n, struct extent_map, rb_node); in __tree_search()
167 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
174 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
177 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
233 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
252 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
[all …]
Dordered-data.c50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert()
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search()
105 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
113 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, in __tree_search()
119 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
160 entry = rb_entry(tree->last, struct btrfs_ordered_extent, in tree_search()
322 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_first_ordered_pending()
397 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_ordered_pending()
449 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); in btrfs_get_logged_extents()
850 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_lookup_ordered_extent()
[all …]
Dfree-space-cache.c647 e = rb_entry(n, struct btrfs_free_space, offset_index); in merge_space_tree()
933 e = rb_entry(node, struct btrfs_free_space, offset_index); in write_cache_extent_entries()
1444 info = rb_entry(parent, struct btrfs_free_space, offset_index); in tree_insert_offset()
1507 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1531 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1545 prev = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1563 entry = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1577 prev = rb_entry(n, struct btrfs_free_space, in tree_search_offset()
1604 entry = rb_entry(n, struct btrfs_free_space, offset_index); in tree_search_offset()
1803 entry = rb_entry(node, struct btrfs_free_space, offset_index); in find_free_space()
[all …]
Dulist.c124 u = rb_entry(n, struct ulist_node, rb_node); in ulist_rbtree_search()
152 cur = rb_entry(parent, struct ulist_node, rb_node); in ulist_rbtree_insert()
Ddelayed-ref.c98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node); in htree_insert()
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head, in htree_insert()
134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node); in find_ref_head()
148 entry = rb_entry(n, struct btrfs_delayed_ref_head, in find_ref_head()
373 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_select_ref_head()
Drelocation.c298 entry = rb_entry(parent, struct tree_entry, rb_node); in tree_insert()
319 entry = rb_entry(n, struct tree_entry, rb_node); in tree_search()
335 struct backref_node *bnode = rb_entry(rb_node, struct backref_node, in backref_tree_panic()
562 node = rb_entry(rb_node, struct mapping_node, rb_node); in find_reloc_root()
870 upper = rb_entry(rb_node, struct backref_node,
987 upper = rb_entry(rb_node, struct backref_node,
1204 node = rb_entry(rb_node, struct backref_node, rb_node);
1215 node = rb_entry(rb_node, struct backref_node,
1318 node = rb_entry(rb_node, struct mapping_node, rb_node);
1347 node = rb_entry(rb_node, struct mapping_node, rb_node);
[all …]
Ddelayed-inode.c352 delayed_item = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
370 *prev = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
382 *next = rb_entry(node, struct btrfs_delayed_item, in __btrfs_lookup_delayed_item()
422 item = rb_entry(parent_node, struct btrfs_delayed_item, in __btrfs_add_delayed_item()
515 item = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_first_delayed_insertion_item()
528 item = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_first_delayed_deletion_item()
541 next = rb_entry(p, struct btrfs_delayed_item, rb_node); in __btrfs_next_delayed_item()
Dextent_io.c276 entry = rb_entry(parent, struct tree_entry, rb_node); in tree_insert()
307 entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
327 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
334 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
337 prev_entry = rb_entry(prev, struct tree_entry, rb_node); in __etree_search()
393 other = rb_entry(other_node, struct extent_state, rb_node); in merge_state()
405 other = rb_entry(other_node, struct extent_state, rb_node); in merge_state()
464 found = rb_entry(node, struct extent_state, rb_node); in insert_state()
520 return rb_entry(next, struct extent_state, rb_node); in next_state()
656 state = rb_entry(node, struct extent_state, rb_node); in __clear_extent_bit()
[all …]
Dinode-map.c267 info = rb_entry(n, struct btrfs_free_space, offset_index); in btrfs_unpin_free_ino()
304 info = rb_entry(n, struct btrfs_free_space, offset_index); in recalculate_thresholds()
Dqgroup.c150 qgroup = rb_entry(n, struct btrfs_qgroup, node); in find_qgroup_rb()
171 qgroup = rb_entry(parent, struct btrfs_qgroup, node); in add_qgroup_rb()
496 qgroup = rb_entry(n, struct btrfs_qgroup, node); in btrfs_free_qgroup_config()
1443 record = rb_entry(node, struct btrfs_qgroup_extent_record, in btrfs_qgroup_prepare_account_extents()
1469 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, in btrfs_qgroup_insert_dirty_extent()
1752 record = rb_entry(node, struct btrfs_qgroup_extent_record, in btrfs_qgroup_account_extents()
2421 qgroup = rb_entry(n, struct btrfs_qgroup, node); in qgroup_rescan_zero_tracking()
Dsend.c2829 entry = rb_entry(parent, struct orphan_dir_info, node); in add_orphan_dir_info()
2852 entry = rb_entry(n, struct orphan_dir_info, node); in get_orphan_dir_info()
2985 entry = rb_entry(parent, struct waiting_dir_move, node); in add_waiting_dir_move()
3008 entry = rb_entry(n, struct waiting_dir_move, node); in get_waiting_dir_move()
3056 entry = rb_entry(parent, struct pending_dir_move, node); in add_pending_dir_move()
3104 entry = rb_entry(n, struct pending_dir_move, node); in get_pending_dir_moves()
6162 pm = rb_entry(n, struct pending_dir_move, node); in btrfs_ioctl_send()
6179 dm = rb_entry(n, struct waiting_dir_move, node); in btrfs_ioctl_send()
6190 odi = rb_entry(n, struct orphan_dir_info, node); in btrfs_ioctl_send()
Dfile.c107 entry = rb_entry(parent, struct inode_defrag, rb_node); in __btrfs_add_inode_defrag()
239 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
253 entry = rb_entry(parent, struct inode_defrag, rb_node); in btrfs_pick_defrag_inode()
273 defrag = rb_entry(node, struct inode_defrag, rb_node); in btrfs_cleanup_defrag_inodes()
Dextent-tree.c158 cache = rb_entry(parent, struct btrfs_block_group_cache, in btrfs_add_block_group_cache()
198 cache = rb_entry(n, struct btrfs_block_group_cache, in block_group_cache_tree_search()
2696 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2701 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2707 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); in find_middle()
2944 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_run_delayed_refs()
3306 cache = rb_entry(node, struct btrfs_block_group_cache, in next_block_group()
9534 block_group = rb_entry(n, struct btrfs_block_group_cache, in btrfs_free_block_groups()
Dinode.c2227 entry = rb_entry(parent, struct sa_defrag_extent_backref, node); in backref_insert()
2660 backref = rb_entry(node, struct sa_defrag_extent_backref, node); in relink_file_extents()
5068 em = rb_entry(node, struct extent_map, rb_node); in evict_inode_truncate_pages()
5105 state = rb_entry(node, struct extent_state, rb_node); in evict_inode_truncate_pages()
5425 entry = rb_entry(parent, struct btrfs_inode, rb_node); in inode_tree_add()
5485 entry = rb_entry(node, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
5496 entry = rb_entry(prev, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
5505 entry = rb_entry(node, struct btrfs_inode, rb_node); in btrfs_invalidate_inodes()
Dtransaction.c96 state = rb_entry(node, struct extent_state, rb_node); in clear_btree_io_tree()
Ddisk-io.c4199 head = rb_entry(node, struct btrfs_delayed_ref_head, in btrfs_destroy_delayed_refs()
Dvolumes.c1527 em = rb_entry(n, struct extent_map, rb_node); in find_next_chunk()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_interval.c11 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end()
56 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval()
97 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval()
144 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap()
173 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
/linux-4.4.14/fs/ext4/
Dblock_validity.c66 entry = rb_entry(parent, struct ext4_system_zone, node); in add_system_zone()
77 new_entry = rb_entry(new_node, struct ext4_system_zone, in add_system_zone()
99 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
111 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
130 entry = rb_entry(node, struct ext4_system_zone, node); in debug_print_tree()
209 entry = rb_entry(n, struct ext4_system_zone, node); in ext4_data_block_valid()
Dextents_status.c185 es = rb_entry(node, struct extent_status, rb_node); in ext4_es_print_tree()
214 es = rb_entry(node, struct extent_status, rb_node); in __es_tree_search()
228 return node ? rb_entry(node, struct extent_status, rb_node) : in __es_tree_search()
276 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_find_delayed_extent_range()
423 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_left()
447 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_right()
641 es = rb_entry(parent, struct extent_status, rb_node); in __es_insert_extent()
808 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_lookup_extent()
907 es = rb_entry(node, struct extent_status, rb_node); in __es_remove_extent()
920 es = rb_entry(node, struct extent_status, rb_node); in __es_remove_extent()
[all …]
Ddir.c446 fname = rb_entry(parent, struct fname, rb_hash); in ext4_htree_store_dirent()
569 fname = rb_entry(info->curr_node, struct fname, rb_hash); in ext4_dx_readdir()
577 fname = rb_entry(info->curr_node, struct fname, in ext4_dx_readdir()
Dmballoc.c3458 entry = rb_entry(n, struct ext4_free_data, efd_node); in ext4_mb_generate_from_freelist()
4604 entry = rb_entry(parent, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
4624 entry = rb_entry(node, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
4636 entry = rb_entry(node, struct ext4_free_data, efd_node); in ext4_mb_free_metadata()
/linux-4.4.14/security/keys/
Dproc.c91 struct key *key = rb_entry(n, struct key, serial_node); in key_serial_next()
111 struct key *key = rb_entry(n, struct key, serial_node); in find_ge_key()
134 minkey = rb_entry(n, struct key, serial_node); in find_ge_key()
157 struct key *key = rb_entry(n, struct key, serial_node); in key_node_serial()
180 struct key *key = rb_entry(_p, struct key, serial_node); in proc_keys_show()
275 struct key_user *user = rb_entry(n, struct key_user, node); in __key_user_next()
335 struct key_user *user = rb_entry(_p, struct key_user, node); in proc_key_users_show()
Dkey.c67 user = rb_entry(parent, struct key_user, node); in key_user_lookup()
158 xkey = rb_entry(parent, struct key, serial_node); in key_alloc_serial()
189 xkey = rb_entry(parent, struct key, serial_node); in key_alloc_serial()
624 key = rb_entry(n, struct key, serial_node); in key_lookup()
Dgc.c220 key = rb_entry(cursor, struct key, serial_node); in key_garbage_collector()
/linux-4.4.14/drivers/gpu/drm/
Ddrm_vma_manager.c153 node = rb_entry(iter, struct drm_vma_offset_node, vm_rb); in drm_vma_offset_lookup_locked()
186 iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb); in _drm_vma_offset_add_rb()
313 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_allow()
365 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_revoke()
406 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_is_allowed()
/linux-4.4.14/lib/
Drbtree_test.c31 if (key < rb_entry(parent, struct test_node, rb)->key) in insert()
50 child_augmented = rb_entry(node->rb.rb_left, struct test_node, in augment_recompute()
56 child_augmented = rb_entry(node->rb.rb_right, struct test_node, in augment_recompute()
76 parent = rb_entry(rb_parent, struct test_node, rb); in RB_DECLARE_CALLBACKS()
144 struct test_node *node = rb_entry(rb, struct test_node, rb); in check()
170 struct test_node *node = rb_entry(rb, struct test_node, rb); in check_augmented()
Dtimerqueue.c50 ptr = rb_entry(parent, struct timerqueue_node, node); in timerqueue_add()
84 rb_entry(rbn, struct timerqueue_node, node) : NULL; in timerqueue_del()
/linux-4.4.14/tools/include/linux/
Drbtree_augmented.h72 rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
83 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
84 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
90 rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
91 rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
Drbtree.h50 #define rb_entry(ptr, type, member) container_of(ptr, type, member) macro
90 ____ptr ? rb_entry(____ptr, type, member) : NULL; \
/linux-4.4.14/net/netfilter/
Dnft_rbtree.c45 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in nft_rbtree_lookup()
92 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in __nft_rbtree_insert()
153 rbe = rb_entry(parent, struct nft_rbtree_elem, node); in nft_rbtree_deactivate()
185 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_walk()
228 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_destroy()
/linux-4.4.14/arch/powerpc/kernel/
Deeh_cache.c69 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_get_device()
116 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_print()
138 piar = rb_entry(parent, struct pci_io_addr_range, rb_node); in eeh_addr_cache_insert()
242 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_rmv_dev()
/linux-4.4.14/net/ceph/
Ddebugfs.c70 rb_entry(n, struct ceph_pg_pool_info, node); in osdmap_show()
89 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
100 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
127 req = rb_entry(rp, struct ceph_mon_generic_request, node); in monc_show()
153 req = rb_entry(p, struct ceph_osd_request, r_node); in osdc_show()
Dosdmap.c402 pg = rb_entry(parent, struct ceph_pg_mapping, node); in __insert_pg_mapping()
425 pg = rb_entry(n, struct ceph_pg_mapping, node); in __lookup_pg_mapping()
466 pi = rb_entry(parent, struct ceph_pg_pool_info, node); in __insert_pg_pool()
486 pi = rb_entry(n, struct ceph_pg_pool_info, node); in __lookup_pg_pool()
524 rb_entry(rbp, struct ceph_pg_pool_info, node); in ceph_pg_poolid_by_name()
661 rb_entry(rb_first(&map->pg_temp), in ceph_osdmap_destroy()
668 rb_entry(rb_first(&map->primary_temp), in ceph_osdmap_destroy()
675 rb_entry(rb_first(&map->pg_pools), in ceph_osdmap_destroy()
Dosd_client.c844 req = rb_entry(parent, struct ceph_osd_request, r_node); in __insert_request()
864 req = rb_entry(n, struct ceph_osd_request, r_node); in __lookup_request()
883 req = rb_entry(n, struct ceph_osd_request, r_node); in __lookup_request_ge()
1093 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), in remove_all_osds()
1185 osd = rb_entry(parent, struct ceph_osd, o_node); in __insert_osd()
1204 osd = rb_entry(n, struct ceph_osd, o_node); in __lookup_osd()
1953 struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); in reset_changed_osds()
1984 req = rb_entry(p, struct ceph_osd_request, r_node); in kick_requests()
2239 event = rb_entry(parent, struct ceph_osd_event, node); in __insert_event()
2261 event = rb_entry(parent, struct ceph_osd_event, node); in __find_event()
Dmon_client.c438 req = rb_entry(n, struct ceph_mon_generic_request, node); in __lookup_generic_req()
458 req = rb_entry(parent, struct ceph_mon_generic_request, node); in __insert_generic_request()
732 req = rb_entry(p, struct ceph_mon_generic_request, node); in __resend_generic_request()
Dauth_x.c106 th = rb_entry(parent, struct ceph_x_ticket_handler, node); in get_ticket_handler()
660 rb_entry(p, struct ceph_x_ticket_handler, node); in ceph_x_destroy()
/linux-4.4.14/Documentation/
Drbtree.txt68 individual members may be accessed directly via rb_entry(node, type, member).
185 rb_entry(node, type, member).
191 printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
272 node = rb_entry(root->rb_node, struct interval_tree_node, rb);
277 rb_entry(node->rb.rb_left,
296 node = rb_entry(node->rb.rb_right,
313 subtree_last = rb_entry(node->rb.rb_left,
319 subtree_last = rb_entry(node->rb.rb_right,
331 rb_entry(rb, struct interval_tree_node, rb);
343 rb_entry(rb_old, struct interval_tree_node, rb);
[all …]
/linux-4.4.14/fs/ocfs2/
Dreservations.c98 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_dump_resv()
153 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_check_resmap()
288 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_resmap_clear_all_resv()
329 tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node); in ocfs2_resv_insert()
380 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_find_resv_lhs()
538 next_resv = rb_entry(next, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
575 next_resv = rb_entry(next, in __ocfs2_resv_find_window()
618 prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
Duptodate.c152 item = rb_entry(node, struct ocfs2_meta_cache_item, c_node); in ocfs2_purge_copied_metadata_tree()
230 item = rb_entry(n, struct ocfs2_meta_cache_item, c_node); in ocfs2_search_cache_tree()
334 tmp = rb_entry(parent, struct ocfs2_meta_cache_item, c_node); in __ocfs2_insert_cache_tree()
Drefcounttree.c207 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node); in ocfs2_find_refcount_tree()
232 tmp = rb_entry(parent, struct ocfs2_refcount_tree, in ocfs2_insert_refcount_tree()
543 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node); in ocfs2_purge_refcount_trees()
/linux-4.4.14/tools/perf/util/
Dsymbol.c157 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate()
160 next = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_duplicate()
188 curr = rb_entry(prevnd, struct symbol, rb_node); in symbols__fixup_end()
192 curr = rb_entry(nd, struct symbol, rb_node); in symbols__fixup_end()
297 pos = rb_entry(next, struct symbol, rb_node); in symbols__delete()
313 s = rb_entry(parent, struct symbol, rb_node); in symbols__insert()
333 struct symbol *s = rb_entry(n, struct symbol, rb_node); in symbols__find()
351 return rb_entry(n, struct symbol, rb_node); in symbols__first()
361 return rb_entry(n, struct symbol, rb_node); in symbols__next()
381 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); in symbols__insert_by_name()
[all …]
Dstrlist.h53 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__first()
61 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__next()
Dintlist.h48 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__first()
56 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__next()
Dmap.c264 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map__fixup_start()
274 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map__fixup_end()
482 struct map *pos = rb_entry(next, struct map, rb_node); in __maps__purge()
565 struct map *pos = rb_entry(nd, struct map, rb_node); in maps__find_symbol_by_name()
618 struct map *pos = rb_entry(nd, struct map, rb_node); in maps__fprintf()
665 struct map *pos = rb_entry(next, struct map, rb_node); in maps__fixup_overlappings()
762 m = rb_entry(parent, struct map, rb_node); in __maps__insert()
804 m = rb_entry(parent, struct map, rb_node); in maps__find()
824 return rb_entry(first, struct map, rb_node); in maps__first()
833 return rb_entry(next, struct map, rb_node); in map__next()
Dhist.c191 n = rb_entry(next, struct hist_entry, rb_node); in hists__output_recalc_col_len()
287 n = rb_entry(next, struct hist_entry, rb_node); in hists__decay_entries()
303 n = rb_entry(next, struct hist_entry, rb_node); in hists__delete_entries()
400 he = rb_entry(parent, struct hist_entry, rb_node_in); in hists__findnew_entry()
996 iter = rb_entry(parent, struct hist_entry, rb_node_in); in hists__collapse_insert_entry()
1068 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__collapse_resort()
1146 iter = rb_entry(parent, struct hist_entry, rb_node); in __hists__insert_output_entry()
1186 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__output_resort()
1241 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__filter_by_dso()
1275 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__filter_by_thread()
[all …]
Dcallchain.c216 rnode = rb_entry(parent, struct callchain_node, rb_node); in rb_insert_callchain()
252 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_flat()
283 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_graph_abs()
313 child = rb_entry(n, struct callchain_node, rb_node_in); in __sort_chain_graph_rel()
376 child = rb_entry(n, struct callchain_node, rb_node_in); in create_child()
500 first = rb_entry(p, struct callchain_node, rb_node_in); in split_add_child()
540 rnode = rb_entry(parent, struct callchain_node, rb_node_in); in append_chain_children()
Dmachine.c115 struct thread *t = rb_entry(nd, struct thread, rb_node); in machine__delete_threads()
171 pos = rb_entry(parent, struct machine, rb_node); in machines__add()
193 struct machine *machine = rb_entry(nd, struct machine, rb_node); in machines__set_symbol_filter()
206 struct machine *machine = rb_entry(nd, struct machine, rb_node); in machines__set_comm_exec()
224 machine = rb_entry(parent, struct machine, rb_node); in machines__find()
278 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__process_guests()
305 machine = rb_entry(node, struct machine, rb_node); in machines__set_id_hdr_size()
383 th = rb_entry(parent, struct thread, rb_node); in ____machine__findnew_thread()
599 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__fprintf_dsos()
619 struct machine *pos = rb_entry(nd, struct machine, rb_node); in machines__fprintf_dsos_buildid()
[all …]
Dbuild-id.c248 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__write_buildid_table()
281 struct machine *pos = rb_entry(nd, struct machine, rb_node); in dsos__hit_all()
525 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__cache_build_ids()
542 struct machine *pos = rb_entry(nd, struct machine, rb_node); in perf_session__read_build_ids()
Dcomm.c60 iter = rb_entry(parent, struct comm_str, rb_node); in comm_str__findnew()
Ddso.c564 cache = rb_entry(next, struct dso_cache, rb_node); in dso_cache__free()
583 cache = rb_entry(parent, struct dso_cache, rb_node); in dso_cache__find()
611 cache = rb_entry(parent, struct dso_cache, rb_node); in dso_cache__insert()
904 struct dso *this = rb_entry(*p, struct dso, rb_node); in __dso__findlink_by_longname()
1315 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); in dso__fprintf()
Dsymbol.h72 nd && (pos = rb_entry(nd, struct symbol, rb_node)); \
Dannotate.c1260 iter = rb_entry(parent, struct source_line, node); in insert_source_line()
1303 iter = rb_entry(parent, struct source_line, node); in __resort_source_line()
1324 src_line = rb_entry(node, struct source_line, node); in resort_source_line()
1434 src_line = rb_entry(node, struct source_line, node); in print_summary()
Dthread-stack.c419 cp = rb_entry(node_parent, struct call_path, rb_node); in call_path__findnew()
/linux-4.4.14/fs/f2fs/
Dextent_cache.c100 en = rb_entry(node, struct extent_node, rb_node); in __lookup_extent_tree()
139 en = rb_entry(node, struct extent_node, rb_node); in __free_extent_tree()
285 en = rb_entry(*pnode, struct extent_node, rb_node); in __lookup_extent_tree_ret()
298 en = rb_entry(parent, struct extent_node, rb_node); in __lookup_extent_tree_ret()
303 rb_entry(tmp_node, struct extent_node, rb_node) : NULL; in __lookup_extent_tree_ret()
309 rb_entry(tmp_node, struct extent_node, rb_node) : NULL; in __lookup_extent_tree_ret()
317 rb_entry(tmp_node, struct extent_node, rb_node) : NULL; in __lookup_extent_tree_ret()
323 rb_entry(tmp_node, struct extent_node, rb_node) : NULL; in __lookup_extent_tree_ret()
377 en = rb_entry(parent, struct extent_node, rb_node); in __insert_extent_tree()
473 rb_entry(node, struct extent_node, rb_node) in f2fs_update_extent_tree_range()
/linux-4.4.14/kernel/locking/
Drtmutex_common.h51 w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter, in rt_mutex_top_waiter()
66 return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter, in task_top_pi_waiter()
Drtmutex.c189 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); in rt_mutex_enqueue()
228 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); in rt_mutex_enqueue_pi()
/linux-4.4.14/arch/arm/xen/
Dp2m.c40 entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); in xen_add_phys_to_mach_entry()
70 entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); in __pfn_to_mfn()
130 p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); in __set_phys_to_machine_multi()
/linux-4.4.14/mm/
Dinterval_tree.c43 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after()
48 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after()
Dmmap.c363 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
369 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
386 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in browse_rb()
429 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in validate_mm_rb()
567 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); in find_vma_links()
582 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in find_vma_links()
1738 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1747 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
1766 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
1779 vma = rb_entry(rb_parent(prev), in unmapped_area()
[all …]
Dnommu.c580 last = rb_entry(lastp, struct vm_region, vm_rb); in validate_nommu_regions()
585 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
586 last = rb_entry(lastp, struct vm_region, vm_rb); in validate_nommu_regions()
615 pregion = rb_entry(parent, struct vm_region, vm_rb); in add_nommu_region()
746 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); in add_vma_to_mm()
775 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); in add_vma_to_mm()
1308 pregion = rb_entry(rb, struct vm_region, vm_rb); in do_mmap()
Dvmalloc.c301 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
323 tmp_va = rb_entry(parent, struct vmap_area, rb_node); in __insert_vmap_area()
339 prev = rb_entry(tmp, struct vmap_area, rb_node); in __insert_vmap_area()
402 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); in alloc_vmap_area()
419 tmp = rb_entry(n, struct vmap_area, rb_node); in alloc_vmap_area()
488 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); in __free_vmap_area()
2278 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; in node_to_va()
2301 va = rb_entry(n, struct vmap_area, rb_node); in pvm_find_next_prev()
Dksm.c750 stable_node = rb_entry(root_stable_tree[nid].rb_node, in remove_all_stable_nodes()
1175 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_search()
1279 stable_node = rb_entry(*new, struct stable_node, node); in stable_tree_insert()
1360 tree_rmap_item = rb_entry(*new, struct rmap_item, node); in unstable_tree_search_insert()
2023 stable_node = rb_entry(node, struct stable_node, node); in ksm_check_stable_tree()
Dmempolicy.c2157 struct sp_node *p = rb_entry(n, struct sp_node, nd); in sp_lookup()
2173 w = rb_entry(prev, struct sp_node, nd); in sp_lookup()
2178 return rb_entry(n, struct sp_node, nd); in sp_lookup()
2191 nd = rb_entry(parent, struct sp_node, nd); in sp_insert()
2392 n = rb_entry(next, struct sp_node, nd); in shared_policy_replace()
2503 n = rb_entry(next, struct sp_node, nd); in mpol_free_shared_policy()
Dutil.c191 next = rb_entry(rb_parent, in __vma_link_list()
Dzswap.c257 entry = rb_entry(node, struct zswap_entry, rbnode); in zswap_rb_search()
280 myentry = rb_entry(parent, struct zswap_entry, rbnode); in zswap_rb_insert()
Dkmemleak.c409 rb_entry(rb, struct kmemleak_object, rb_node); in lookup_object()
594 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); in create_object()
Dbacking-dev.c696 rb_entry(rbn, struct bdi_writeback_congested, rb_node); in cgwb_bdi_destroy()
Dmemcontrol.c514 mz_node = rb_entry(parent, struct mem_cgroup_per_zone, in __mem_cgroup_insert_exceeded()
623 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node); in __mem_cgroup_largest_soft_limit_node()
/linux-4.4.14/fs/proc/
Dtask_nommu.c27 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_mem()
88 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_vsize()
106 vma = rb_entry(p, struct vm_area_struct, vm_rb); in task_statm()
212 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), in show_map()
Dnommu.c82 return nommu_region_show(m, rb_entry(p, struct vm_region, vm_rb)); in nommu_region_list_show()
Dproc_sysctl.c115 ctl_node = rb_entry(node, struct ctl_node, node); in find_entry()
149 parent_node = rb_entry(parent, struct ctl_node, node); in insert_entry()
352 ctl_node = rb_entry(node, struct ctl_node, node); in first_usable_entry()
/linux-4.4.14/fs/xfs/
Dxfs_extent_busy.c74 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node); in xfs_extent_busy_insert()
123 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_search()
323 rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_reuse()
370 rb_entry(rbp, struct xfs_extent_busy, rb_node); in xfs_extent_busy_trim()
Dxfs_buf.c487 bp = rb_entry(parent, struct xfs_buf, b_rbnode); in _xfs_buf_find()
/linux-4.4.14/tools/perf/ui/browsers/
Dhists.c63 rb_entry(nd, struct hist_entry, rb_node); in hist_browser__get_folding()
162 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__count_rows_rb_tree()
204 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__count_rows()
240 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__init_have_children_rb_tree()
280 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__init_have_children()
335 struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); in callchain_node__set_folding_rb_tree()
376 struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); in callchain__set_folding()
404 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); in __hist_browser__set_folding()
472 struct hist_entry *h = rb_entry(browser->b.top, in hist_browser__run()
596 struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); in hist_browser__show_callchain()
[all …]
Dmap.c22 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); in map_browser__write()
116 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); in map__browse()
Dannotate.c328 l = rb_entry(parent, struct browser_disasm_line, rb_node); in disasm_rb_tree__insert()
369 bpos = rb_entry(nd, struct browser_disasm_line, rb_node); in annotate_browser__set_rb_top()
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_verbs_mcast.c131 mcast = rb_entry(n, struct ipath_mcast, rb_node); in ipath_mcast_find()
177 tmcast = rb_entry(pn, struct ipath_mcast, rb_node); in ipath_mcast_add()
305 mcast = rb_entry(n, struct ipath_mcast, rb_node); in ipath_multicast_detach()
/linux-4.4.14/fs/logfs/
Dgc.c193 cur = rb_entry(parent, struct gc_candidate, rb_node); in add_list()
212 cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node); in add_list()
241 cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); in get_best_cand()
352 return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node); in first_in_list()
597 cand = rb_entry(rb_first(&super->s_free_list.rb_tree), in logfs_journal_wl_pass()
705 cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate, in logfs_cleanup_list()
/linux-4.4.14/drivers/char/
Dmmtimer.c273 x = rb_entry(parent, struct mmtimer, list); in mmtimer_add_list()
288 if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, in mmtimer_add_list()
309 x = rb_entry(n->next, struct mmtimer, list); in mmtimer_set_next_timer()
533 base = rb_entry(timers[indx].next, struct mmtimer, list); in mmtimer_interrupt()
567 x = rb_entry(mn->next, struct mmtimer, list); in mmtimer_tasklet()
626 t = rb_entry(n, struct mmtimer, list); in sgi_timer_del()
/linux-4.4.14/tools/perf/
Dbuiltin-kmem.c77 data = rb_entry(*node, struct alloc_stat, node); in insert_alloc_stat()
120 data = rb_entry(*node, struct alloc_stat, node); in insert_caller_stat()
203 data = rb_entry(node, struct alloc_stat, node); in search_alloc_stat()
431 data = rb_entry(*node, struct page_stat, node); in __page_stat__findnew_page()
481 data = rb_entry(*node, struct page_stat, node); in __page_stat__findnew_alloc()
536 data = rb_entry(*node, struct page_stat, node); in __page_stat__findnew_caller()
962 struct alloc_stat *data = rb_entry(next, struct alloc_stat, in __print_slab_result()
1035 data = rb_entry(next, struct page_stat, node); in __print_page_alloc_result()
1078 data = rb_entry(next, struct page_stat, node); in __print_page_caller_result()
1219 this = rb_entry(*new, struct alloc_stat, node); in sort_slab_insert()
[all …]
Dbuiltin-annotate.c129 struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); in hists__find_annotations()
Dbuiltin-diff.c439 struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); in hists__baseline_only()
465 he = rb_entry(next, struct hist_entry, rb_node_in); in hists__precompute()
Dbuiltin-sched.c1320 data = rb_entry(node, struct work_atoms, node); in perf_sched__sort_lat()
1634 data = rb_entry(node, struct work_atoms, node); in perf_sched__merge_lat()
1660 work_list = rb_entry(next, struct work_atoms, node); in perf_sched__lat()
Dbuiltin-top.c360 n = rb_entry(next, struct hist_entry, rb_node); in perf_top__prompt_symbol()
/linux-4.4.14/net/rxrpc/
Dar-connection.c92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node); in rxrpc_get_bundle()
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); in rxrpc_get_bundle()
252 xconn = rb_entry(parent, struct rxrpc_connection, node); in rxrpc_assign_connection_id()
292 xconn = rb_entry(parent, struct rxrpc_connection, node); in rxrpc_assign_connection_id()
316 xcall = rb_entry(parent, struct rxrpc_call, conn_node); in rxrpc_add_call_ID_to_conn()
644 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_incoming_connection()
687 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_incoming_connection()
774 conn = rb_entry(p, struct rxrpc_connection, node); in rxrpc_find_connection()
Dar-call.c335 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
367 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_get_client_call()
490 call = rb_entry(parent, struct rxrpc_call, conn_node); in rxrpc_incoming_call()
588 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_find_server_call()
791 call = rb_entry(p, struct rxrpc_call, sock_node); in rxrpc_release_calls_on_socket()
Dar-connevent.c39 call = rb_entry(p, struct rxrpc_call, conn_node); in rxrpc_abort_calls()
Dar-accept.c342 call = rb_entry(parent, struct rxrpc_call, sock_node); in rxrpc_accept_call()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dverbs_mcast.c139 mcast = rb_entry(n, struct hfi1_mcast, rb_node); in hfi1_mcast_find()
184 tmcast = rb_entry(pn, struct hfi1_mcast, rb_node); in mcast_add()
327 mcast = rb_entry(n, struct hfi1_mcast, rb_node); in hfi1_multicast_detach()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_verbs_mcast.c122 mcast = rb_entry(n, struct qib_mcast, rb_node); in qib_mcast_find()
167 tmcast = rb_entry(pn, struct qib_mcast, rb_node); in qib_mcast_add()
307 mcast = rb_entry(n, struct qib_mcast, rb_node); in qib_multicast_detach()
/linux-4.4.14/drivers/mtd/ubi/
Dwl.c161 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); in wl_tree_add()
257 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in in_wl_tree()
317 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_wl_entry()
324 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in find_wl_entry()
358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
708 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
729 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
979 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
[all …]
Dattach.c270 av = rb_entry(parent, struct ubi_ainf_volume, rb); in add_volume()
481 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in ubi_add_to_av()
612 av = rb_entry(p, struct ubi_ainf_volume, rb); in ubi_find_av()
639 aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb); in ubi_remove_av()
1148 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); in destroy_av()
1198 av = rb_entry(rb, struct ubi_ainf_volume, rb); in destroy_ai()
Dfastmap.c191 av = rb_entry(parent, struct ubi_ainf_volume, rb); in add_vol()
242 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in assign_aeb_to_av()
281 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in update_vol()
385 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); in process_pool_aeb()
425 av = rb_entry(node, struct ubi_ainf_volume, rb); in unmap_peb()
429 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); in unmap_peb()
Dfastmap-wl.c387 e = rb_entry(rb_next(root->rb_node), in may_reserve_for_fm()
Deba.c105 le = rb_entry(p, struct ubi_ltree_entry, rb); in ltree_lookup()
171 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); in ltree_add_entry()
/linux-4.4.14/security/integrity/
Diint.c39 iint = rb_entry(n, struct integrity_iint_cache, rb_node); in __integrity_iint_find()
111 test_iint = rb_entry(parent, struct integrity_iint_cache, in integrity_inode_get()
/linux-4.4.14/tools/perf/ui/stdio/
Dhist.c100 child = rb_entry(node, struct callchain_node, rb_node); in __callchain__fprintf_graph()
178 cnode = rb_entry(node, struct callchain_node, rb_node); in callchain__fprintf_graph()
248 chain = rb_entry(rb_node, struct callchain_node, rb_node); in callchain__fprintf_flat()
462 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in hists__fprintf()
/linux-4.4.14/fs/ubifs/
Dlog.c50 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_search_bud()
83 bud = rb_entry(p, struct ubifs_bud, rb); in ubifs_get_wbuf()
134 b = rb_entry(parent, struct ubifs_bud, rb); in ubifs_add_bud()
314 bud = rb_entry(p1, struct ubifs_bud, rb); in remove_buds()
559 dr = rb_entry(parent, struct done_ref, rb); in done_already()
Dorphan.c85 o = rb_entry(parent, struct ubifs_orphan, rb); in ubifs_add_orphan()
123 o = rb_entry(p, struct ubifs_orphan, rb); in ubifs_delete_orphan()
525 o = rb_entry(parent, struct ubifs_orphan, rb); in insert_dead_orphan()
755 o = rb_entry(p, struct ubifs_orphan, rb); in dbg_find_orphan()
782 o = rb_entry(parent, struct check_orphan, rb); in dbg_ins_check_orphan()
804 o = rb_entry(p, struct check_orphan, rb); in dbg_find_check_orphan()
Drecovery.c1266 e = rb_entry(parent, struct size_entry, rb); in add_ino()
1299 e = rb_entry(p, struct size_entry, rb); in find_ino()
1482 e = rb_entry(this, struct size_entry, rb); in ubifs_recover_size()
Ddebug.c623 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_budg()
711 bud = rb_entry(rb, struct ubifs_bud, rb); in ubifs_dump_lprop()
1807 fscki = rb_entry(parent, struct fsck_inode, rb); in add_inode()
1885 fscki = rb_entry(p, struct fsck_inode, rb); in search_inode()
2154 fscki = rb_entry(this, struct fsck_inode, rb); in check_inodes()
Dtnc_commit.c175 o = rb_entry(p, struct ubifs_old_idx, rb); in find_old_idx()
Dtnc.c91 o = rb_entry(parent, struct ubifs_old_idx, rb); in insert_old_idx()
/linux-4.4.14/drivers/base/regmap/
Dregcache-rbtree.c242 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); in regcache_rbtree_exit()
418 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, in regcache_rbtree_write()
468 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_sync()
508 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_drop()
Dregmap-debugfs.c622 range_node = rb_entry(next, struct regmap_range_node, node); in regmap_debugfs_init()
Dregmap.c425 range_node = rb_entry(next, struct regmap_range_node, node); in regmap_range_exit()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dcm.c151 rb_entry(node, struct id_map_entry, node); in id_map_find_by_sl_id()
230 ent = rb_entry(parent, struct id_map_entry, node); in sl_id_map_add()
441 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean()
453 rb_entry(nd, struct id_map_entry, node); in mlx4_ib_cm_paravirt_clean()
Dmcg.c170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
193 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
1106 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1237 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
/linux-4.4.14/fs/afs/
Dcallback.c48 vnode = rb_entry(server->cb_promises.rb_node, in afs_init_callback_state()
153 vnode = rb_entry(p, struct afs_vnode, server_rb); in afs_break_one_callback()
384 vnode = rb_entry(rb_first(&server->cb_promises),
Dserver.c48 xserver = rb_entry(p, struct afs_server, master_rb); in afs_install_server()
193 server = rb_entry(p, struct afs_server, master_rb); in afs_find_server()
Dvnode.c32 vnode = rb_entry(node, struct afs_vnode, cb_promise);
85 xvnode = rb_entry(parent, struct afs_vnode, server_rb); in afs_install_vnode()
156 xvnode = rb_entry(parent, struct afs_vnode, cb_promise); in afs_vnode_note_promise()
/linux-4.4.14/kernel/power/
Dwakelock.c47 wl = rb_entry(node, struct wakelock, node); in pm_show_wakelocks()
160 wl = rb_entry(*node, struct wakelock, node); in wakelock_lookup_add()
Dswap.c130 ext = rb_entry(*new, struct swsusp_extent, node); in swsusp_extents_insert()
/linux-4.4.14/drivers/staging/android/ion/
Dion.c159 entry = rb_entry(parent, struct ion_buffer, node); in ion_buffer_add()
408 struct ion_handle *entry = rb_entry(n, struct ion_handle, node); in ion_handle_lookup()
456 entry = rb_entry(parent, struct ion_handle, node); in ion_handle_add()
690 struct ion_handle *handle = rb_entry(n, struct ion_handle, in ion_debug_client_show()
728 struct ion_client *client = rb_entry(node, struct ion_client, in ion_get_client_serial()
793 entry = rb_entry(parent, struct ion_client, node); in ion_client_create()
836 struct ion_handle *handle = rb_entry(n, struct ion_handle, in ion_client_destroy()
1387 struct ion_handle *handle = rb_entry(n, in ion_debug_heap_total()
1409 struct ion_client *client = rb_entry(n, struct ion_client, in ion_debug_heap_show()
1430 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, in ion_debug_heap_show()
/linux-4.4.14/net/core/
Dgen_estimator.c159 e = rb_entry(parent, struct gen_estimator, node); in gen_add_node()
179 e = rb_entry(p, struct gen_estimator, node); in gen_find_node()
/linux-4.4.14/drivers/android/
Dbinder.c485 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
510 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer()
535 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_buffer_lookup()
686 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_buf()
706 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_buf()
874 node = rb_entry(n, struct binder_node, rb_node); in binder_get_node()
896 node = rb_entry(parent, struct binder_node, rb_node); in binder_new_node()
1012 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref()
1034 ref = rb_entry(parent, struct binder_ref, rb_node_node); in binder_get_ref_for_node()
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref_for_node()
[all …]
/linux-4.4.14/fs/ext2/
Dballoc.c213 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in __rsv_window_dump()
300 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in search_reserve_window()
317 rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node); in search_reserve_window()
343 this = rb_entry(parent, struct ext2_reserve_window_node, rsv_node); in ext2_rsv_window_add()
813 rsv = rb_entry(next,struct ext2_reserve_window_node,rsv_node); in find_next_reservable_window()
1055 next_rsv = rb_entry(next, struct ext2_reserve_window_node, rsv_node); in try_to_extend_reservation()
/linux-4.4.14/fs/ceph/
Dcaps.c316 cap = rb_entry(n, struct ceph_cap, ci_node); in __get_cap_for_mds()
348 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_get_cap_mds()
380 cap = rb_entry(parent, struct ceph_cap, ci_node); in __insert_cap_node()
676 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued()
707 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued_other()
757 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_issued_mask()
784 cap = rb_entry(q, struct ceph_cap, in __ceph_caps_issued_mask()
808 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_revoking_other()
870 cap = rb_entry(p, struct ceph_cap, ci_node); in __ceph_caps_mds_wanted()
1085 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); in ceph_queue_caps_release()
[all …]
Dxattr.c353 xattr = rb_entry(parent, struct ceph_inode_xattr, node); in __set_xattr()
446 xattr = rb_entry(parent, struct ceph_inode_xattr, node); in __get_xattr()
522 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __copy_xattr_names()
546 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __ceph_destroy_xattrs()
692 xattr = rb_entry(p, struct ceph_inode_xattr, node); in __ceph_build_xattrs_blob()
Dmds_client.c576 req = rb_entry(n, struct ceph_mds_request, r_node); in __lookup_request()
598 req = rb_entry(parent, struct ceph_mds_request, r_node); in __insert_request()
651 rb_entry(p, struct ceph_mds_request, r_node); in __unregister_request()
813 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); in __choose_mds()
1061 req = rb_entry(p, struct ceph_mds_request, r_node); in cleanup_session_requests()
1163 cf = rb_entry(n, struct ceph_cap_flush, i_node); in remove_session_caps_cb()
1511 cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL; in check_caps_flush()
1740 return rb_entry(rb_first(&mdsc->request_tree), in __get_oldest_req()
2255 req = rb_entry(p, struct ceph_mds_request, r_node); in kick_requests()
2761 req = rb_entry(p, struct ceph_mds_request, r_node); in replay_unsafe_requests()
[all …]
Dinode.c126 frag = rb_entry(parent, struct ceph_inode_frag, node); in __get_or_create_frag()
165 rb_entry(n, struct ceph_inode_frag, node); in __ceph_find_frag()
322 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
340 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
362 frag = rb_entry(rb_node, struct ceph_inode_frag, node); in ceph_fill_fragtree()
510 frag = rb_entry(n, struct ceph_inode_frag, node); in ceph_destroy_inode()
Ddebugfs.c61 req = rb_entry(rp, struct ceph_mds_request, r_node); in mdsc_show()
Dsnap.c89 r = rb_entry(parent, struct ceph_snap_realm, node); in __insert_snap_realm()
142 r = rb_entry(n, struct ceph_snap_realm, node); in __lookup_snap_realm()
Daddr.c1633 perm = rb_entry(*p, struct ceph_pool_perm, node); in __ceph_pool_perm_get()
1653 perm = rb_entry(parent, struct ceph_pool_perm, node); in __ceph_pool_perm_get()
1812 perm = rb_entry(n, struct ceph_pool_perm, node); in ceph_pool_perm_destroy()
/linux-4.4.14/kernel/sched/
Ddeadline.c169 entry = rb_entry(parent, struct task_struct, in enqueue_pushable_dl_task()
840 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); in dec_dl_deadline()
894 entry = rb_entry(parent, struct sched_dl_entity, rb_node); in __enqueue_dl_entity()
1152 return rb_entry(left, struct sched_dl_entity, rb_node); in pick_next_dl_entity()
1287 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); in pick_next_earliest_dl_task()
1313 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks); in pick_earliest_pushable_dl_task()
1478 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, in pick_next_pushable_dl_task()
Dfair.c465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost, in update_min_vruntime()
498 entry = rb_entry(parent, struct sched_entity, run_node); in __enqueue_entity()
541 return rb_entry(left, struct sched_entity, run_node); in __pick_first_entity()
551 return rb_entry(next, struct sched_entity, run_node); in __pick_next_entity()
562 return rb_entry(last, struct sched_entity, run_node); in __pick_last_entity()
/linux-4.4.14/drivers/mtd/
Dmtdswap.c92 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
94 #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
226 cur = rb_entry(parent, struct swap_eb, rb); in __mtdswap_rb_add()
453 median = rb_entry(medrb, struct swap_eb, rb)->erase_count; in mtdswap_check_counts()
625 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); in mtdswap_map_free_block()
910 eb = rb_entry(rb_first(rp), struct swap_eb, rb); in mtdswap_pick_gc_eblk()
1238 min[i] = rb_entry(rb_first(root), struct swap_eb, in mtdswap_show()
1240 max[i] = rb_entry(rb_last(root), struct swap_eb, in mtdswap_show()
/linux-4.4.14/net/rds/
Dcong.c113 map = rb_entry(parent, struct rds_cong_map, m_rb_node); in rds_cong_tree_walk()
398 map = rb_entry(node, struct rds_cong_map, m_rb_node); in rds_cong_exit()
Drdma.c75 mr = rb_entry(parent, struct rds_mr, r_rb_node); in rds_mr_tree_walk()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c116 tfp = rb_entry(parent, struct fw_page, rb_node); in insert_page()
151 tfp = rb_entry(tmp, struct fw_page, rb_node); in find_fw_page()
501 fwp = rb_entry(p, struct fw_page, rb_node); in mlx5_reclaim_startup_pages()
/linux-4.4.14/tools/lib/lockdep/
Dpreload.c107 l = rb_entry(*node, struct lock_lookup, node); in __get_lock_node()
184 return rb_entry(*node, struct lock_lookup, node); in __get_lock()
/linux-4.4.14/tools/perf/ui/gtk/
Dhists.c106 node = rb_entry(nd, struct callchain_node, rb_node); in perf_gtk__add_callchain()
224 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); in perf_gtk__show_hists()
/linux-4.4.14/fs/fscache/
Dobject-list.c59 xobj = rb_entry(parent, struct fscache_object, objlist_link); in fscache_objlist_add()
114 obj = rb_entry(p, struct fscache_object, objlist_link); in fscache_objlist_lookup()
/linux-4.4.14/arch/sh/kernel/
Ddwarf.c326 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); in dwarf_lookup_cie()
362 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); in dwarf_lookup_fde()
842 cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); in dwarf_parse_cie()
928 fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); in dwarf_parse_fde()
/linux-4.4.14/fs/dlm/
Ddebug_fs.c458 r = rb_entry(node, struct dlm_rsb, res_hashnode); in table_seq_start()
490 r = rb_entry(node, struct dlm_rsb, res_hashnode); in table_seq_start()
524 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next()
555 r = rb_entry(next, struct dlm_rsb, res_hashnode); in table_seq_next()
Drecover.c907 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_create_root_list()
944 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_clear_toss()
Dlockspace.c818 rsb = rb_entry(n, struct dlm_rsb, res_hashnode); in release_lockspace()
824 rsb = rb_entry(n, struct dlm_rsb, res_hashnode); in release_lockspace()
Dlock.c455 r = rb_entry(node, struct dlm_rsb, res_hashnode); in dlm_search_rsb_tree()
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb, in rsb_insert()
1095 r = rb_entry(n, struct dlm_rsb, res_hashnode); in dlm_dump_rsb_hash()
1672 r = rb_entry(n, struct dlm_rsb, res_hashnode); in shrink_bucket()
5477 r = rb_entry(n, struct dlm_rsb, res_hashnode); in find_grant_rsb()
/linux-4.4.14/fs/
Deventpoll.c757 epi = rb_entry(rbp, struct epitem, rbn); in ep_free()
773 epi = rb_entry(rbp, struct epitem, rbn); in ep_free()
880 struct epitem *epi = rb_entry(rbp, struct epitem, rbn); in ep_show_fdinfo()
979 epi = rb_entry(rbp, struct epitem, rbn); in ep_find()
1115 epic = rb_entry(parent, struct epitem, rbn); in ep_rbtree_insert()
1692 epi = rb_entry(rbp, struct epitem, rbn); in ep_loop_check_proc()
/linux-4.4.14/net/sched/
Dsch_hfsc.c202 cl1 = rb_entry(parent, struct hfsc_class, el_node); in eltree_insert()
233 p = rb_entry(n, struct hfsc_class, el_node); in eltree_get_mindl()
251 return rb_entry(n, struct hfsc_class, el_node); in eltree_get_minel()
267 cl1 = rb_entry(parent, struct hfsc_class, vt_node); in vttree_insert()
297 p = rb_entry(n, struct hfsc_class, vt_node); in vttree_firstfit()
336 cl1 = rb_entry(parent, struct hfsc_class, cf_node); in cftree_insert()
668 p = rb_entry(n, struct hfsc_class, cf_node); in update_cfmin()
691 max_cl = rb_entry(n, struct hfsc_class, vt_node); in init_vf()
Dsch_htb.c277 c = rb_entry(parent, struct htb_class, node[prio]); in htb_add_to_id_tree()
311 c = rb_entry(parent, struct htb_class, pq_node); in htb_add_to_wait_tree()
708 cl = rb_entry(p, struct htb_class, pq_node); in htb_do_events()
737 rb_entry(n, struct htb_class, node[prio]); in htb_id_find_next_upper()
797 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
/linux-4.4.14/drivers/vfio/
Dvfio_iommu_type1.c96 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); in vfio_find_dma()
116 dma = rb_entry(parent, struct vfio_dma, node); in vfio_link_dma()
673 dma = rb_entry(n, struct vfio_dma, node); in vfio_iommu_replay()
859 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); in vfio_iommu_unmap_unpin_all()
/linux-4.4.14/fs/gfs2/
Drgrp.c511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_blk2rgrpd()
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_rgrpd_get_first()
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_rgrpd_get_next()
713 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in return_all_reservations()
726 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); in gfs2_clear_rgrpd()
870 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, in rgd_insert()
1469 rb_entry(*newn, struct gfs2_blkreserv, rs_node); in rs_insert()
1566 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
1582 rs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_next_unreserved_block()
2240 trs = rb_entry(n, struct gfs2_blkreserv, rs_node); in gfs2_rgrp_dump()
/linux-4.4.14/net/802/
Dgarp.c158 attr = rb_entry(parent, struct garp_attr, node); in garp_attr_lookup()
179 attr = rb_entry(parent, struct garp_attr, node); in garp_attr_create()
391 attr = rb_entry(node, struct garp_attr, node); in garp_gid_event()
Dmrp.c247 attr = rb_entry(parent, struct mrp_attr, node); in mrp_attr_lookup()
268 attr = rb_entry(parent, struct mrp_attr, node); in mrp_attr_create()
577 attr = rb_entry(node, struct mrp_attr, node); in mrp_mad_event()
/linux-4.4.14/fs/cachefiles/
Dnamei.c112 object = rb_entry(p, struct cachefiles_object, active_node); in cachefiles_mark_object_buried()
170 xobject = rb_entry(_parent, in cachefiles_mark_object_active()
867 object = rb_entry(_n, struct cachefiles_object, active_node); in cachefiles_check_active()
/linux-4.4.14/kernel/events/
Duprobes.c398 uprobe = rb_entry(n, struct uprobe, rb_node); in __find_uprobe()
435 u = rb_entry(parent, struct uprobe, rb_node); in __insert_uprobe()
993 struct uprobe *u = rb_entry(n, struct uprobe, rb_node); in find_node_in_range()
1032 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1039 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
Dcore.c4536 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
4550 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
4574 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
4672 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
7880 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
/linux-4.4.14/drivers/infiniband/core/
Dmulticast.c138 group = rb_entry(node, struct mcast_group, node); in mcast_find()
162 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
764 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
Dcm.c480 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, in cm_insert_listen()
510 cm_id_priv = rb_entry(node, struct cm_id_private, service_node); in cm_find_listen()
541 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, in cm_insert_remote_id()
567 timewait_info = rb_entry(node, struct cm_timewait_info, in cm_find_remote_id()
594 cur_timewait_info = rb_entry(parent, struct cm_timewait_info, in cm_insert_remote_qpn()
624 cur_cm_id_priv = rb_entry(parent, struct cm_id_private, in cm_insert_remote_sidr()
Duverbs_cmd.c668 scan = rb_entry(parent, struct xrcd_table_entry, node); in xrcd_table_insert()
693 entry = rb_entry(p, struct xrcd_table_entry, node); in xrcd_table_search()
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
Dipoib_multicast.c165 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in __ipoib_mcast_find()
190 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); in __ipoib_mcast_add()
1000 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in ipoib_mcast_iter_next()
Dipoib_main.c492 path = rb_entry(n, struct ipoib_path, rb_node); in __path_find()
518 tpath = rb_entry(pn, struct ipoib_path, rb_node); in __path_add()
589 path = rb_entry(n, struct ipoib_path, rb_node); in ipoib_path_iter_next()
/linux-4.4.14/fs/nfs/
Dnfs4state.c180 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_get_renew_cred_server_locked()
394 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); in nfs4_find_state_owner_locked()
421 sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); in nfs4_insert_state_owner_locked()
1562 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_reset_seqids()
1612 sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); in nfs4_clear_reclaim_server()
1723 sp = rb_entry(pos, in nfs4_do_reclaim()
Dfscache.c103 xkey = rb_entry(parent, struct nfs_fscache_key, node); in nfs_fscache_get_super_cookie()
Ddir.c2182 entry = rb_entry(n, struct nfs_access_entry, rb_node); in __nfs_access_zap_cache()
2214 entry = rb_entry(n, struct nfs_access_entry, rb_node); in nfs_access_search_rbtree()
2304 entry = rb_entry(parent, struct nfs_access_entry, rb_node); in nfs_access_add_rbtree()
/linux-4.4.14/fs/nilfs2/
Dthe_nilfs.c727 root = rb_entry(n, struct nilfs_root, rb_node); in nilfs_lookup_root()
766 root = rb_entry(parent, struct nilfs_root, rb_node); in nilfs_find_or_create_root()
/linux-4.4.14/block/
Delevator.c305 __rq = rb_entry(parent, struct request, rb_node); in elv_rb_add()
332 rq = rb_entry(n, struct request, rb_node); in elv_rb_find()
Dcfq-iosched.c68 #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
1175 return rb_entry(root->left, struct cfq_queue, rb_node); in cfq_rb_first()
2214 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); in cfq_service_tree_add()
2252 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); in cfq_service_tree_add()
2291 cfqq = rb_entry(parent, struct cfq_queue, p_node); in cfq_prio_tree_lookup()
2786 __cfqq = rb_entry(parent, struct cfq_queue, p_node); in cfqq_close()
2797 __cfqq = rb_entry(node, struct cfq_queue, p_node); in cfqq_close()
Dblk-throttle.c84 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
/linux-4.4.14/drivers/block/
Dpktcdvd.c628 return rb_entry(n, struct pkt_rb_node, rb_node); in pkt_rbtree_next()
654 tmp = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_rbtree_find()
685 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); in pkt_rbtree_insert()
1219 first_node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
1237 node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
/linux-4.4.14/drivers/macintosh/
Dvia-pmu.c2032 struct rb_entry { struct
2064 struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; in pmu_pass_intr()
2121 struct rb_entry *rp = &pp->rb_buf[i]; in pmu_read()
/linux-4.4.14/drivers/xen/
Devtchn.c617 evtchn = rb_entry(node, struct user_evtchn, node); in evtchn_release()
/linux-4.4.14/fs/nfs/blocklayout/
Dextent_tree.c14 return rb_entry(node, struct pnfs_block_extent, be_node); in ext_node()
/linux-4.4.14/ipc/
Dmqueue.c128 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); in msg_insert()
182 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); in msg_get()
/linux-4.4.14/fs/ocfs2/cluster/
Dnodemanager.c90 node = rb_entry(parent, struct o2nm_node, nd_ip_node); in o2nm_node_ip_tree_lookup()
Dtcp.c776 nmh = rb_entry(parent, struct o2net_msg_handler, nh_node); in o2net_handler_tree_lookup()
/linux-4.4.14/arch/blackfin/kernel/
Dtrace.c131 vma = rb_entry(n, struct vm_area_struct, vm_rb); in decode_address()
/linux-4.4.14/fs/cifs/
Dconnect.c3911 tlink = rb_entry(node, struct tcon_link, tl_rbnode); in cifs_umount()
4062 tlink = rb_entry(node, struct tcon_link, tl_rbnode); in tlink_rb_search()
4082 tlink = rb_entry(*new, struct tcon_link, tl_rbnode); in tlink_rb_insert()
4210 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); in cifs_prune_tlinks()
/linux-4.4.14/net/wireless/
Dscan.c639 tbss = rb_entry(parent, struct cfg80211_internal_bss, rbn); in rb_insert_bss()
668 bss = rb_entry(n, struct cfg80211_internal_bss, rbn); in rb_find_bss()
/linux-4.4.14/fs/kernfs/
Ddir.c25 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
/linux-4.4.14/fs/fuse/
Dfile.c2660 ff = rb_entry(last, struct fuse_file, polled_node); in fuse_find_polled_node()
2756 ff = rb_entry(*link, struct fuse_file, polled_node); in fuse_notify_poll_wakeup()
Ddev.c2101 ff = rb_entry(p, struct fuse_file, polled_node); in end_polls()
/linux-4.4.14/drivers/md/
Ddm-crypt.c1188 #define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
Ddm-thin.c1927 #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
/linux-4.4.14/fs/nfsd/
Dnfs4state.c2059 clp = rb_entry(*new, struct nfs4_client, cl_namenode); in add_clp_to_name_tree()
2080 clp = rb_entry(node, struct nfs4_client, cl_namenode); in find_clp_in_name_tree()