/linux-4.1.27/tools/perf/tests/ |
D | hists_output.c | 169 node = rb_next(node); in test1() 175 node = rb_next(node); in test1() 181 node = rb_next(node); in test1() 187 node = rb_next(node); in test1() 193 node = rb_next(node); in test1() 199 node = rb_next(node); in test1() 205 node = rb_next(node); in test1() 211 node = rb_next(node); in test1() 268 node = rb_next(node); in test2() 323 node = rb_next(node); in test3() [all …]
|
D | vmlinux-kallsyms.c | 110 for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) { in test__vmlinux_matches_kallsyms() 156 rb_next(&pair->rb_node); in test__vmlinux_matches_kallsyms() 187 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { in test__vmlinux_matches_kallsyms() 207 for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) { in test__vmlinux_matches_kallsyms() 232 nd; nd = rb_next(nd)) { in test__vmlinux_matches_kallsyms()
|
D | hists_common.c | 179 node = rb_next(node); in print_hists_in() 207 node = rb_next(node); in print_hists_out()
|
D | hists_link.c | 178 node = rb_next(node); in __validate_match() 235 node = rb_next(node); in __validate_link()
|
D | hists_cumulate.c | 202 node = rb_next(node), i++) { in do_test()
|
/linux-4.1.27/net/ceph/ |
D | debugfs.c | 68 for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { in osdmap_show() 87 for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { in osdmap_show() 98 for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { in osdmap_show() 125 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { in monc_show() 148 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { in osdc_show()
|
D | mon_client.c | 715 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { in __resend_generic_request()
|
D | osd_client.c | 1941 n = rb_next(p); in reset_changed_osds() 1971 p = rb_next(p); in kick_requests()
|
D | osdmap.c | 522 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { in ceph_pg_poolid_by_name()
|
/linux-4.1.27/arch/x86/mm/ |
D | pat_rbtree.c | 113 node = rb_next(&match->rb); in memtype_rb_exact_match() 142 node = rb_next(&match->rb); in memtype_rb_check_conflict() 154 node = rb_next(&match->rb); in memtype_rb_check_conflict() 237 node = rb_next(node); in rbt_memtype_copy_nth_element()
|
/linux-4.1.27/security/keys/ |
D | proc.c | 89 n = rb_next(n); in key_serial_next() 94 n = rb_next(n); in key_serial_next() 131 n = rb_next(&minkey->serial_node); in find_ge_key() 278 n = rb_next(n); in __key_user_next() 285 return __key_user_next(user_ns, rb_next(n)); in key_user_next()
|
D | gc.c | 221 cursor = rb_next(cursor); in key_garbage_collector()
|
D | key.c | 185 parent = rb_next(parent); in key_alloc_serial()
|
/linux-4.1.27/lib/ |
D | timerqueue.c | 78 struct rb_node *rbn = rb_next(&node->node); in timerqueue_del() 103 next = rb_next(&node->node); in timerqueue_iterate_next()
|
D | rbtree_test.c | 143 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { in check() 169 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { in check_augmented()
|
D | rbtree.c | 444 struct rb_node *rb_next(const struct rb_node *node) in rb_next() function 474 EXPORT_SYMBOL(rb_next);
|
/linux-4.1.27/fs/proc/ |
D | task_nommu.c | 26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_mem() 87 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_vsize() 105 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { in task_statm() 244 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) in m_start() 272 return p ? rb_next(p) : NULL; in m_next()
|
D | nommu.c | 92 for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) in nommu_region_list_start() 106 return rb_next((struct rb_node *) v); in nommu_region_list_next()
|
D | generic.c | 49 return rb_entry_safe(rb_next(&dir->subdir_node), struct proc_dir_entry, in pde_subdir_next()
|
D | proc_sysctl.c | 351 for (;node; node = rb_next(node)) { in first_usable_entry() 386 ctl_node = first_usable_entry(rb_next(&ctl_node->node)); in next_entry()
|
/linux-4.1.27/tools/perf/ |
D | builtin-annotate.c | 134 nd = rb_next(nd); in hists__find_annotations() 156 nd = rb_next(nd); in hists__find_annotations() 165 next = rb_next(nd); in hists__find_annotations() 178 nd = rb_next(nd); in hists__find_annotations()
|
D | builtin-kmem.c | 561 next = rb_next(next); in __print_slab_result() 607 next = rb_next(next); in __print_page_result()
|
D | builtin-diff.c | 438 next = rb_next(&he->rb_node_in); in hists__baseline_only() 463 next = rb_next(&he->rb_node_in); in hists__precompute()
|
D | builtin-lock.c | 771 node = rb_next(node); in dump_threads()
|
D | builtin-top.c | 359 next = rb_next(&n->rb_node); in perf_top__prompt_symbol()
|
D | builtin-sched.c | 1567 next = rb_next(next); in perf_sched__lat()
|
/linux-4.1.27/tools/perf/util/ |
D | symbol.c | 156 nd = rb_next(&curr->rb_node); in symbols__fixup_duplicate() 170 nd = rb_next(&curr->rb_node); in symbols__fixup_duplicate() 187 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { in symbols__fixup_end() 210 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { in __map_groups__fixup_end() 292 next = rb_next(&pos->rb_node); in symbols__delete() 352 struct rb_node *n = rb_next(&sym->rb_node); in symbols__next() 390 for (nd = rb_first(source); nd; nd = rb_next(nd)) { in symbols__sort_by_name() 457 struct rb_node *n = rb_next(&s->rb_node); in symbol__next_by_name() 485 for (nd = rb_first(&dso->symbol_names[type]); nd; nd = rb_next(nd)) { in dso__fprintf_symbols_by_name() 647 next = rb_next(&pos->rb_node); in dso__split_kallsyms_for_kcore() [all …]
|
D | rblist.c | 111 next = rb_next(pos); in rblist__delete() 122 for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { in rblist__entry()
|
D | map.c | 434 next = rb_next(&pos->rb_node); in maps__delete() 506 next = rb_next(&pos->rb_node); in map_groups__flush() 543 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { in map_groups__find_symbol_by_name() 580 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { in __map_groups__fprintf_maps() 643 next = rb_next(&pos->rb_node); in map_groups__fixup_overlappings() 709 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { in map_groups__clone() 775 struct rb_node *next = rb_next(&map->rb_node); in maps__next()
|
D | callchain.c | 304 n = rb_next(n); in __sort_chain_flat() 335 n = rb_next(n); in __sort_chain_graph_abs() 365 n = rb_next(n); in __sort_chain_graph_rel() 429 n = rb_next(n); in create_child() 714 n = rb_next(n); in merge_chain_branch() 867 n = rb_next(n); in free_callchain_node()
|
D | hist.c | 174 next = rb_next(&n->rb_node); in hists__output_recalc_col_len() 268 next = rb_next(&n->rb_node); in hists__decay_entries() 284 next = rb_next(&n->rb_node); in hists__delete_entries() 1038 next = rb_next(&n->rb_node_in); in hists__collapse_resort() 1148 next = rb_next(&n->rb_node_in); in hists__output_resort() 1201 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { in hists__filter_by_dso() 1235 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { in hists__filter_by_thread() 1267 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { in hists__filter_by_symbol() 1378 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in hists__match() 1403 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in hists__link()
|
D | build-id.c | 226 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { in perf_session__write_buildid_table() 265 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { in dsos__hit_all() 511 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { in perf_session__cache_build_ids() 532 for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) { in perf_session__read_build_ids()
|
D | machine.c | 99 nd = rb_next(nd); in machine__delete_threads() 173 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { in machines__set_symbol_filter() 186 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { in machines__set_comm_exec() 258 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { in machines__process_guests() 285 for (node = rb_first(&machines->guests); node; node = rb_next(node)) { in machines__set_id_hdr_size() 525 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { in machines__fprintf_dsos() 547 for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) { in machines__fprintf_dsos_buildid() 578 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { in machine__fprintf() 779 next = rb_next(&pos->rb_node); in machines__destroy_kernel_maps() 1757 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { in machine__for_each_thread()
|
D | strlist.h | 55 rn = rb_next(&sn->rb_node); in strlist__next()
|
D | intlist.h | 55 rn = rb_next(&in->rb_node); in intlist__next()
|
D | symbol.h | 73 nd = rb_next(nd))
|
D | dso.c | 505 next = rb_next(&cache->rb_node); in dso_cache__free() 1122 for (nd = rb_first(&dso->symbols[type]); nd; nd = rb_next(nd)) { in dso__fprintf()
|
D | annotate.c | 1159 next = rb_next(node); in resort_source_line() 1282 node = rb_next(node); in print_summary()
|
D | event.c | 349 nd; nd = rb_next(nd)) { in perf_event__synthesize_modules()
|
D | probe-event.c | 174 for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { in kernel_get_module_map()
|
/linux-4.1.27/tools/perf/ui/browsers/ |
D | hists.c | 59 nd = rb_next(nd)) { in hist_browser__get_folding() 164 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__count_rows_rb_tree() 206 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__count_rows() 230 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__init_have_children_rb_tree() 268 bool has_sibling = nd && rb_next(nd); in callchain__init_have_children() 270 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in callchain__init_have_children() 314 for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { in callchain_node__set_folding_rb_tree() 355 for (nd = rb_first(chain); nd; nd = rb_next(nd)) { in callchain__set_folding() 383 nd = rb_next(nd)) { in __hist_browser__set_folding() 574 need_percent = node && rb_next(node); in hist_browser__show_callchain() [all …]
|
D | map.c | 116 for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { in map__browse()
|
D | annotate.c | 720 nd = rb_next(nd); in annotate_browser__run()
|
/linux-4.1.27/drivers/base/regmap/ |
D | regcache-rbtree.c | 154 node = rb_next(node)) { in rbtree_show() 243 next = rb_next(&rbtree_node->node); in regcache_rbtree_exit() 417 node = rb_next(node)) { in regcache_rbtree_write() 467 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_sync() 507 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { in regcache_rbtree_drop()
|
D | regmap-debugfs.c | 549 next = rb_next(&range_node->node); in regmap_debugfs_init()
|
D | regmap.c | 422 next = rb_next(&range_node->node); in regmap_range_exit()
|
/linux-4.1.27/tools/perf/ui/stdio/ |
D | hist.c | 111 next = rb_next(node); in __callchain__fprintf_graph() 177 if (node && !rb_next(node)) { in callchain__fprintf_graph() 257 rb_node = rb_next(rb_node); in callchain__fprintf_flat() 461 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { in hists__fprintf()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | eeh_cache.c | 121 n = rb_next(n); in eeh_addr_cache_print() 249 n = rb_next(n); in __eeh_addr_cache_rmv_dev()
|
/linux-4.1.27/fs/ext4/ |
D | block_validity.c | 109 node = rb_next(new_node); in add_system_zone() 134 node = rb_next(node); in debug_print_tree()
|
D | extents_status.c | 189 node = rb_next(node); in ext4_es_print_tree() 227 node = rb_next(&es->rb_node); in __es_tree_search() 275 while ((node = rb_next(&es1->rb_node)) != NULL) { in ext4_es_find_delayed_extent_range() 443 node = rb_next(&es->rb_node); in ext4_es_try_to_merge_right() 905 node = rb_next(&es->rb_node); in __es_remove_extent() 913 node = rb_next(&es->rb_node); in __es_remove_extent() 1262 node = rb_next(&es->rb_node); in es_do_reclaim_extents()
|
D | dir.c | 581 info->curr_node = rb_next(info->curr_node); in ext4_dx_readdir()
|
D | mballoc.c | 3442 n = rb_next(n); in ext4_mb_generate_from_freelist() 4613 node = rb_next(new_node); in ext4_mb_free_metadata()
|
/linux-4.1.27/fs/ocfs2/ |
D | reservations.c | 105 node = rb_next(node); in ocfs2_dump_resv() 183 node = rb_next(node); in ocfs2_check_resmap() 392 node = rb_next(node); in ocfs2_find_resv_lhs() 573 next = rb_next(prev); in __ocfs2_resv_find_window()
|
/linux-4.1.27/fs/btrfs/ |
D | extent_map.c | 114 parent = rb_next(parent); in tree_insert() 166 prev = rb_next(prev); in __tree_search() 250 rb = rb_next(&em->rb_node); in try_merge_map()
|
D | delayed-ref.c | 225 n = rb_next(&entry->href_node); in find_ref_head() 289 node = rb_next(&ref->rb_node); in merge_ref() 294 node = rb_next(node); in merge_ref() 369 node = rb_next(&ref->rb_node); in btrfs_merge_delayed_refs() 424 node = rb_next(&head->href_node); in btrfs_select_ref_head()
|
D | free-space-cache.c | 648 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in merge_space_tree() 947 node = rb_next(node); in write_cache_extent_entries() 1530 n = rb_next(n); in tree_search_offset() 1603 n = rb_next(&entry->offset_index); in tree_search_offset() 1788 for (node = &entry->offset_index; node; node = rb_next(node)) { in find_free_space() 1894 struct rb_node *next = rb_next(&bitmap_info->offset_index); in remove_from_bitmap() 2432 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { in btrfs_dump_free_space() 2496 node = rb_next(&entry->offset_index); in __btrfs_return_cluster_to_free_space() 2711 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster() 2725 node = rb_next(&entry->offset_index); in btrfs_alloc_from_cluster() [all …]
|
D | extent_io.c | 307 prev = rb_next(prev); in __etree_search() 384 other_node = rb_next(&state->rb_node); in merge_state() 498 struct rb_node *next = rb_next(&state->rb_node); in next_state() 782 node = rb_next(node); in wait_extent_bit() 1462 node = rb_next(node); in find_first_extent_bit_state() 1489 n = rb_next(&state->rb_node); in find_first_extent_bit() 1495 n = rb_next(n); in find_first_extent_bit() 1566 node = rb_next(node); in find_delalloc_range() 1858 node = rb_next(node); in count_range_bits() 1970 node = rb_next(node); in test_range_bit()
|
D | ordered-data.c | 102 test = rb_next(prev); in __tree_search() 836 node = rb_next(node); in btrfs_lookup_ordered_range()
|
D | qgroup.c | 1670 n = rb_next(&oper->n); in qgroup_account_deleted_refs() 1709 n = rb_next(&tmp_oper->n); in qgroup_account_deleted_refs() 2888 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { in qgroup_rescan_zero_tracking()
|
D | relocation.c | 1544 prev = rb_next(prev); 1559 node = rb_next(node); 2970 rb_node = rb_next(rb_node); 2981 rb_node = rb_next(rb_node); 3002 rb_node = rb_next(rb_node);
|
D | delayed-inode.c | 381 else if ((node = rb_next(prev_node)) != NULL) { in __btrfs_lookup_delayed_item() 535 p = rb_next(&item->rb_node); in __btrfs_next_delayed_item()
|
D | backref.c | 591 n = rb_next(n); in __add_delayed_refs()
|
D | extent-tree.c | 2345 node = rb_next(node); in select_delayed_ref() 2860 node = rb_next(node); in btrfs_run_delayed_refs() 2940 node = rb_next(node); in check_delayed_ref() 3205 node = rb_next(&cache->cache_node); in next_block_group()
|
D | ctree.c | 426 next = rb_next(node); in btrfs_put_tree_mod_seq() 1333 next = rb_next(&tm->node); in __tree_mod_log_rewind()
|
D | file.c | 251 parent = rb_next(parent); in btrfs_pick_defrag_inode()
|
D | inode.c | 5430 prev = rb_next(prev); in btrfs_invalidate_inodes() 5455 node = rb_next(node); in btrfs_invalidate_inodes() 6574 next = rb_next(&em->rb_node); in next_extent_map()
|
/linux-4.1.27/drivers/iommu/ |
D | iova.c | 112 struct rb_node *node = rb_next(&free->node); in __cached_rbnode_delete_update() 445 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { in reserve_iova() 482 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { in copy_reserved_iova()
|
D | arm-smmu.c | 1814 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { in arm_smmu_device_dt_probe() 1843 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) { in arm_smmu_device_remove()
|
/linux-4.1.27/kernel/trace/ |
D | trace_stat.c | 191 node = rb_next(node); in stat_seq_start() 206 return rb_next(node); in stat_seq_next()
|
/linux-4.1.27/include/linux/ |
D | rbtree.h | 66 extern struct rb_node *rb_next(const struct rb_node *);
|
/linux-4.1.27/tools/perf/ui/gtk/ |
D | hists.c | 98 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in perf_gtk__add_callchain() 223 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { in perf_gtk__show_hists()
|
/linux-4.1.27/drivers/char/ |
D | mmtimer.c | 342 n->next = rb_next(&x->list); in mmtimer_set_next_timer() 575 mn->next = rb_next(&x->list); in mmtimer_tasklet() 642 timers[nodeid].next = rb_next(n); in sgi_timer_del()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_interval.c | 170 node = rb_next(&i->rb); in drbd_next_overlap()
|
/linux-4.1.27/fs/jffs2/ |
D | nodelist.h | 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
|
/linux-4.1.27/kernel/power/ |
D | wakelock.c | 45 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { in pm_show_wakelocks()
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion.c | 689 for (n = rb_first(&client->handles); n; n = rb_next(n)) { in ion_debug_client_show() 727 for (node = rb_first(root); node; node = rb_next(node)) { in ion_get_client_serial() 1384 for (n = rb_first(&client->handles); n; n = rb_next(n)) { in ion_debug_heap_total() 1406 for (n = rb_first(&dev->clients); n; n = rb_next(n)) { in ion_debug_heap_show() 1427 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { in ion_debug_heap_show()
|
/linux-4.1.27/tools/perf/ui/ |
D | browser.c | 131 nd = rb_next(nd); in ui_browser__rb_tree_seek() 155 nd = rb_next(nd); in ui_browser__rb_tree_refresh()
|
/linux-4.1.27/fs/kernfs/ |
D | dir.c | 985 rbn = rb_next(&pos->rb); in kernfs_next_descendant_post() 1384 struct rb_node *node = rb_next(&pos->rb); in kernfs_dir_pos() 1399 struct rb_node *node = rb_next(&pos->rb); in kernfs_dir_next_pos()
|
/linux-4.1.27/fs/ext2/ |
D | balloc.c | 235 n = rb_next(n); in __rsv_window_dump() 812 next = rb_next(&rsv->rsv_node); in find_next_reservable_window() 1050 next = rb_next(&my_rsv->rsv_node); in try_to_extend_reservation()
|
/linux-4.1.27/net/netfilter/ |
D | nft_rbtree.c | 184 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { in nft_rbtree_walk()
|
/linux-4.1.27/fs/ceph/ |
D | caps.c | 347 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_get_cap_mds() 675 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued() 706 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_other() 756 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_mask() 783 q = rb_next(q)) { in __ceph_caps_issued_mask() 807 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_revoking_other() 867 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_mds_wanted() 1106 p = rb_next(p); in ceph_queue_caps_release() 1631 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in ceph_check_caps()
|
D | debugfs.c | 60 for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { in mdsc_show()
|
D | xattr.c | 530 p = rb_next(p); in __copy_xattr_names() 548 p = rb_next(tmp); in __ceph_destroy_xattrs() 701 p = rb_next(p); in __ceph_build_xattrs_blob()
|
D | inode.c | 324 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node)) in ceph_fill_fragtree() 346 rb_node = rb_next(rb_node); in ceph_fill_fragtree() 349 rb_node = rb_next(rb_node); in ceph_fill_fragtree() 364 rb_node = rb_next(rb_node); in ceph_fill_fragtree()
|
D | mds_client.c | 1043 p = rb_next(p); in cleanup_session_requests() 2210 p = rb_next(p); in kick_requests() 2702 p = rb_next(p); in replay_unsafe_requests() 2930 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { in send_mds_reconnect() 3483 n = rb_next(&req->r_node); in wait_unsafe_requests()
|
/linux-4.1.27/drivers/mtd/ubi/ |
D | fastmap-wl.c | 358 e = rb_entry(rb_next(root->rb_node), in may_reserve_for_fm()
|
D | ubi.h | 982 rb = rb_next(rb), \
|
D | fastmap.c | 422 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { in unmap_peb() 426 node2 = rb_next(node2)) { in unmap_peb()
|
/linux-4.1.27/Documentation/ |
D | rbtree.txt | 173 struct rb_node *rb_next(struct rb_node *node); 179 node by calling rb_next() or rb_prev() on the current node. This will return 190 for (node = rb_first(&mytree); node; node = rb_next(node))
|
/linux-4.1.27/fs/dlm/ |
D | recover.c | 906 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { in dlm_create_root_list() 943 next = rb_next(n); in dlm_clear_toss()
|
D | debug_fs.c | 457 for (node = rb_first(tree); node; node = rb_next(node)) { in table_seq_start() 521 next = rb_next(&rp->res_hashnode); in table_seq_next()
|
D | lock.c | 1094 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { in dlm_dump_rsb_hash() 1671 next = rb_next(n); in shrink_bucket() 5476 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { in find_grant_rsb()
|
/linux-4.1.27/drivers/android/ |
D | binder.c | 1054 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { in binder_get_ref_for_node() 2993 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { in binder_deferred_flush() 3368 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) in print_binder_proc() 3371 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { in print_binder_proc() 3380 n = rb_next(n)) in print_binder_proc() 3384 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) in print_binder_proc() 3491 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) in print_binder_proc_stats() 3500 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) in print_binder_proc_stats() 3506 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { in print_binder_proc_stats() 3516 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) in print_binder_proc_stats()
|
/linux-4.1.27/kernel/locking/ |
D | rtmutex.c | 199 lock->waiters_leftmost = rb_next(&waiter->tree_entry); in rt_mutex_dequeue() 238 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); in rt_mutex_dequeue_pi()
|
/linux-4.1.27/fs/ |
D | eventpoll.c | 756 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_free() 879 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_show_fdinfo() 1691 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { in ep_loop_check_proc()
|
/linux-4.1.27/drivers/block/xen-blkback/ |
D | blkback.c | 178 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ 181 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) 294 !rb_next(&persistent_gnt->node)) { in free_persistent_gnts()
|
/linux-4.1.27/kernel/sched/ |
D | deadline.c | 196 next_node = rb_next(&p->pushable_dl_tasks); in dequeue_pushable_dl_task() 862 next_node = rb_next(&dl_se->rb_node); in __dequeue_dl_entity() 1219 next_node = rb_next(next_node); in pick_next_earliest_dl_task()
|
D | fair.c | 532 next_node = rb_next(&se->run_node); in __dequeue_entity() 551 struct rb_node *next = rb_next(&se->run_node); in __pick_next_entity()
|
/linux-4.1.27/block/ |
D | deadline-iosched.c | 68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
|
D | elevator.c | 1040 struct rb_node *rbnext = rb_next(&rq->rb_node); in elv_rb_latter_request()
|
D | cfq-iosched.c | 1211 struct rb_node *rbnext = rb_next(&last->rb_node); in cfq_find_next_rq() 2614 node = rb_next(&__cfqq->p_node); in cfqq_close()
|
/linux-4.1.27/net/rxrpc/ |
D | ar-connevent.c | 38 for (p = rb_first(&conn->calls); p; p = rb_next(p)) { in rxrpc_abort_calls()
|
D | ar-connection.c | 288 parent = rb_next(parent); in rxrpc_assign_connection_id()
|
D | ar-call.c | 790 for (p = rb_first(&rx->calls); p; p = rb_next(p)) { in rxrpc_release_calls_on_socket()
|
/linux-4.1.27/fs/ext3/ |
D | balloc.c | 243 n = rb_next(n); in __rsv_window_dump() 1026 next = rb_next(&rsv->rsv_node); in find_next_reservable_window() 1270 next = rb_next(&my_rsv->rsv_node); in try_to_extend_reservation()
|
D | dir.c | 499 info->curr_node = rb_next(info->curr_node); in ext3_dx_readdir()
|
/linux-4.1.27/fs/nfs/ |
D | nfs4state.c | 179 pos = rb_next(pos)) { in nfs4_get_renew_cred_server_locked() 1562 pos = rb_next(pos)) { in nfs4_reset_seqids() 1612 pos = rb_next(pos)) { in nfs4_clear_reclaim_server() 1723 pos = rb_next(pos)) { in nfs4_do_reclaim()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | cm.c | 454 nd = rb_next(nd); in mlx4_ib_cm_paravirt_clean()
|
D | mcg.c | 1087 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) in _mlx4_ib_mcg_port_cleanup() 1232 for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { in clean_vf_mcast()
|
/linux-4.1.27/fs/ubifs/ |
D | recovery.c | 1527 this = rb_next(this); in ubifs_recover_size() 1541 this = rb_next(this); in ubifs_recover_size()
|
D | log.c | 313 p = rb_next(p); in remove_buds()
|
D | debug.c | 622 for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { in ubifs_dump_budg() 710 for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { in ubifs_dump_lprop() 2155 this = rb_next(this); in check_inodes()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
D | extent_tree.c | 34 struct rb_node *node = rb_next(&be->be_node); in ext_tree_next()
|
/linux-4.1.27/drivers/md/bcache/ |
D | util.h | 555 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | trace.c | 128 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { in decode_address()
|
/linux-4.1.27/net/802/ |
D | garp.c | 389 next = node ? rb_next(node) : NULL, node != NULL; in garp_gid_event()
|
D | mrp.c | 575 next = node ? rb_next(node) : NULL, node != NULL; in mrp_mad_event()
|
/linux-4.1.27/mm/ |
D | nommu.c | 600 while ((p = rb_next(lastp))) { in validate_nommu_regions() 1348 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { in do_mmap_pgoff()
|
D | mempolicy.c | 2370 struct rb_node *next = rb_next(&n->nd); in shared_policy_replace() 2507 next = rb_next(&n->nd); in mpol_free_shared_policy()
|
D | mmap.c | 383 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in browse_rb() 426 for (nd = rb_first(root); nd; nd = rb_next(nd)) { in validate_mm_rb()
|
D | ksm.c | 2012 node = rb_next(node); in ksm_check_stable_tree()
|
D | vmalloc.c | 2317 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); in pvm_find_next_prev()
|
/linux-4.1.27/net/sched/ |
D | sch_hfsc.c | 232 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { in eltree_get_mindl() 296 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { in vttree_firstfit()
|
D | sch_htb.c | 329 *n = rb_next(*n); in htb_next_rb_node()
|
/linux-4.1.27/drivers/vfio/ |
D | vfio_iommu_type1.c | 656 for (; n; n = rb_next(n)) { in vfio_iommu_replay()
|
/linux-4.1.27/drivers/infiniband/core/ |
D | multicast.c | 762 for (node = rb_first(&port->table); node; node = rb_next(node)) { in mcast_groups_event()
|
/linux-4.1.27/fs/f2fs/ |
D | data.c | 493 node = rb_next(&en->rb_node); in __try_front_merge() 553 next = rb_next(node); in __free_extent_tree()
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_multicast.c | 979 n = rb_next(n); in ipoib_mcast_iter_next()
|
D | ipoib_main.c | 370 n = rb_next(n); in ipoib_path_iter_next()
|
/linux-4.1.27/fs/gfs2/ |
D | rgrp.c | 567 n = rb_next(&rgd->rd_node); in gfs2_rgrpd_get_next() 2216 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { in gfs2_rgrp_dump()
|
/linux-4.1.27/drivers/mtd/ |
D | mtdswap.c | 259 p = rb_next(p); in mtdswap_rb_index()
|
/linux-4.1.27/kernel/events/ |
D | uprobes.c | 1044 for (t = n; (t = rb_next(t)); ) { in build_probe_list()
|
/linux-4.1.27/fs/fuse/ |
D | dev.c | 2158 p = rb_next(p); in end_polls()
|
/linux-4.1.27/drivers/block/ |
D | pktcdvd.c | 624 struct rb_node *n = rb_next(&node->rb_node); in pkt_rbtree_next()
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_cache.c | 155 return rb_extent(rb_next(&ext->oe_node)); in next_extent()
|
/linux-4.1.27/fs/cifs/ |
D | connect.c | 4117 node = rb_next(tmp); in cifs_prune_tlinks()
|
/linux-4.1.27/drivers/md/ |
D | dm-thin.c | 1741 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
|