rb_first 103 arch/powerpc/kernel/eeh_cache.c n = rb_first(&cache->rb_root); rb_first 226 arch/powerpc/kernel/eeh_cache.c n = rb_first(&pci_io_addr_cache_root.rb_root); rb_first 277 arch/powerpc/kernel/eeh_cache.c for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { rb_first 254 arch/x86/mm/pat_rbtree.c node = rb_first(&memtype_rbroot); rb_first 817 block/bfq-cgroup.c child_entity = bfq_entity_of(rb_first(child_active)); rb_first 841 block/bfq-cgroup.c while ((entity = bfq_entity_of(rb_first(active)))) rb_first 909 block/bfq-iosched.c rbnext = rb_first(&bfqq->sort_list); rb_first 953 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { rb_first 1468 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { rb_first 4983 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { rb_first 5311 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { rb_first 5444 drivers/android/binder.c while ((n = rb_first(&proc->threads))) { rb_first 5456 drivers/android/binder.c while ((n = rb_first(&proc->nodes))) { rb_first 5476 drivers/android/binder.c while ((n = rb_first(&proc->refs_by_desc))) { rb_first 5720 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) rb_first 5724 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { rb_first 5752 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); rb_first 5884 drivers/android/binder.c for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) rb_first 5898 drivers/android/binder.c for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) rb_first 5906 drivers/android/binder.c for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { rb_first 415 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; rb_first 424 drivers/android/binder_alloc.c for (n = rb_first(&alloc->free_buffers); n != NULL; rb_first 750 drivers/android/binder_alloc.c while ((n = rb_first(&alloc->allocated_buffers))) { rb_first 826 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) rb_first 879 drivers/android/binder_alloc.c for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) rb_first 148 drivers/base/regmap/regcache-rbtree.c for (node = rb_first(&rbtree_ctx->root); node != NULL; rb_first 225 drivers/base/regmap/regcache-rbtree.c next = rb_first(&rbtree_ctx->root); rb_first 476 drivers/base/regmap/regcache-rbtree.c for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rb_first 516 drivers/base/regmap/regcache-rbtree.c for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { rb_first 623 drivers/base/regmap/regmap-debugfs.c next = rb_first(&map->range_tree); rb_first 581 drivers/base/regmap/regmap.c next = rb_first(&map->range_tree); rb_first 1184 drivers/block/pktcdvd.c n = rb_first(&pd->bio_queue); rb_first 1202 drivers/block/pktcdvd.c n = rb_first(&pd->bio_queue); rb_first 213 drivers/block/xen-blkback/blkback.c for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ rb_first 215 drivers/fpga/dfl-afu-dma-region.c struct rb_node *node = rb_first(&afu->dma_regions); rb_first 321 drivers/gpu/drm/i915/gvt/kvmgt.c node = rb_first(&vgpu->vdev.gfn_cache); rb_first 1809 drivers/gpu/drm/i915/i915_drv.h for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ rb_first 52 drivers/gpu/drm/i915/i915_scheduler.c rb_first(&execlists->queue.rb_root)); rb_first 986 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c while ((node = rb_first(&vmm->root))) { rb_first 778 drivers/infiniband/core/multicast.c for (node = rb_first(&port->table); node; node = rb_next(node)) { rb_first 408 drivers/infiniband/hw/mlx4/cm.c while (rb_first(sl_id_map)) { rb_first 410 drivers/infiniband/hw/mlx4/cm.c rb_entry(rb_first(sl_id_map), rb_first 419 drivers/infiniband/hw/mlx4/cm.c nd = rb_first(sl_id_map); rb_first 1088 drivers/infiniband/hw/mlx4/mcg.c for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) rb_first 1102 drivers/infiniband/hw/mlx4/mcg.c while ((p = rb_first(&ctx->mcg_table)) != NULL) { rb_first 1231 drivers/infiniband/hw/mlx4/mcg.c for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) { rb_first 657 drivers/infiniband/ulp/ipoib/ipoib_main.c n = rb_first(&priv->path_tree); rb_first 1025 drivers/infiniband/ulp/ipoib/ipoib_multicast.c n = rb_first(&priv->multicast_tree); rb_first 674 drivers/iommu/iova.c for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { rb_first 712 drivers/iommu/iova.c for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { rb_first 534 drivers/md/bcache/util.h container_of_or_null(rb_first(root), type, member) rb_first 1554 drivers/md/dm-crypt.c io = crypt_io_from_node(rb_first(&write_tree)); rb_first 230 drivers/md/dm-dust.c nnode = rb_first(tree); rb_first 1750 drivers/md/dm-snap.c next = rb_first(&s->out_of_order_tree); rb_first 2140 drivers/md/dm-thin.c for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { rb_first 646 drivers/md/dm-writecache.c next = rb_first(&wc->freetree); rb_first 1643 drivers/md/dm-writecache.c e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node); rb_first 79 drivers/mtd/mtdswap.c #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ rb_first 241 drivers/mtd/mtdswap.c p = rb_first(root); rb_first 576 drivers/mtd/mtdswap.c eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); rb_first 861 drivers/mtd/mtdswap.c eb = rb_entry(rb_first(rp), struct swap_eb, rb); rb_first 1039 drivers/mtd/ubi/ubi.h for (rb = rb_first(root), \ rb_first 325 drivers/mtd/ubi/wl.c e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); rb_first 366 drivers/mtd/ubi/wl.c first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); rb_first 731 drivers/mtd/ubi/wl.c e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); rb_first 752 drivers/mtd/ubi/wl.c e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); rb_first 1036 drivers/mtd/ubi/wl.c e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); rb_first 5035 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c for (p = rb_first(root); p; p = rb_next(p)) { rb_first 358 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c p = rb_first(&dev->priv.page_root); rb_first 529 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c p = rb_first(&dev->priv.page_root); rb_first 1197 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); rb_first 1858 drivers/vfio/vfio_iommu_type1.c while ((node = rb_first(&iommu->dma_list))) rb_first 1866 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); rb_first 1873 drivers/vfio/vfio_iommu_type1.c p = rb_first(&dma->pfn_list); rb_first 1889 drivers/vfio/vfio_iommu_type1.c n = rb_first(&iommu->dma_list); rb_first 769 fs/afs/cell.c for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) { rb_first 453 fs/afs/server.c for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) { rb_first 379 fs/btrfs/ctree.c for (node = rb_first(tm_root); node; node = next) { rb_first 2087 fs/btrfs/extent-tree.c n = rb_first(root); rb_first 276 fs/btrfs/extent_io.c node = rb_first(&tree->state); rb_first 257 fs/btrfs/file.c node = rb_first(&fs_info->defrag_inodes); rb_first 265 fs/btrfs/file.c node = rb_first(&fs_info->defrag_inodes); rb_first 644 fs/btrfs/free-space-cache.c for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { rb_first 929 fs/btrfs/free-space-cache.c struct rb_node *node = rb_first(&ctl->free_space_offset); rb_first 942 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); rb_first 964 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); rb_first 2080 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); rb_first 2497 fs/btrfs/free-space-cache.c for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { rb_first 2556 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); rb_first 2766 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); rb_first 3444 fs/btrfs/free-space-cache.c entry = rb_entry(rb_first(&ctl->free_space_offset), rb_first 1160 fs/btrfs/free-space-tree.c node = rb_first(&fs_info->block_group_cache_tree); rb_first 265 fs/btrfs/inode-map.c n = rb_first(rbroot); rb_first 2946 fs/btrfs/inode.c node = rb_first(&new->root); rb_first 5475 fs/btrfs/inode.c node = rb_first(&io_tree->state); rb_first 10780 fs/btrfs/inode.c node = rb_first(&fs_info->swapfile_pins); rb_first 519 fs/btrfs/qgroup.c while ((n = rb_first(&fs_info->qgroup_tree))) { rb_first 2520 fs/btrfs/qgroup.c while ((node = rb_first(&delayed_refs->dirty_extent_root))) { rb_first 3299 fs/btrfs/qgroup.c for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { rb_first 239 fs/btrfs/ref-verify.c while ((n = rb_first(&be->roots))) { rb_first 245 fs/btrfs/ref-verify.c while((n = rb_first(&be->refs))) { rb_first 643 fs/btrfs/ref-verify.c for (n = rb_first(&be->refs); n; n = rb_next(n)) { rb_first 651 fs/btrfs/ref-verify.c for (n = rb_first(&be->roots); n; n = rb_next(n)) { rb_first 903 fs/btrfs/ref-verify.c while ((n = rb_first(&fs_info->block_tree))) { rb_first 2588 fs/btrfs/relocation.c while ((rb_node = rb_first(blocks))) { rb_first 7312 fs/btrfs/send.c n = rb_first(&sctx->pending_dir_moves); rb_first 7329 fs/btrfs/send.c n = rb_first(&sctx->waiting_dir_moves); rb_first 7340 fs/btrfs/send.c n = rb_first(&sctx->orphan_dirs); rb_first 2059 fs/ceph/addr.c n = rb_first(&mdsc->pool_perm_tree); rb_first 798 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 829 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 879 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 905 fs/ceph/caps.c for (q = rb_first(&ci->i_caps); q != p; rb_first 930 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 994 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 1011 fs/ceph/caps.c return rb_first(&ci->i_caps) == rb_last(&ci->i_caps); rb_first 1247 fs/ceph/caps.c p = rb_first(&ci->i_caps); rb_first 1923 fs/ceph/caps.c for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { rb_first 60 fs/ceph/debugfs.c for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { rb_first 340 fs/ceph/inode.c rb_node = rb_first(&ci->i_fragtree); rb_first 359 fs/ceph/inode.c rb_node = rb_first(&ci->i_fragtree); rb_first 573 fs/ceph/inode.c while ((n = rb_first(&ci->i_fragtree)) != NULL) { rb_first 1003 fs/ceph/mds_client.c cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); rb_first 1297 fs/ceph/mds_client.c p = rb_first(&mdsc->request_tree); rb_first 2110 fs/ceph/mds_client.c return rb_entry(rb_first(&mdsc->request_tree), rb_first 2647 fs/ceph/mds_client.c struct rb_node *p = rb_first(&mdsc->request_tree); rb_first 3231 fs/ceph/mds_client.c p = rb_first(&mdsc->request_tree); rb_first 3520 fs/ceph/mds_client.c for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { rb_first 185 fs/ceph/quota.c node = rb_first(&mdsc->quotarealms_inodes); rb_first 1142 fs/ceph/snap.c while ((p = rb_first(&mdsc->snapid_map_tree))) { rb_first 603 fs/ceph/xattr.c p = rb_first(&ci->i_xattrs.index); rb_first 626 fs/ceph/xattr.c p = rb_first(&ci->i_xattrs.index); rb_first 775 fs/ceph/xattr.c p = rb_first(&ci->i_xattrs.index); rb_first 5173 fs/cifs/connect.c while ((node = rb_first(root))) { rb_first 5485 fs/cifs/connect.c node = rb_first(root); rb_first 454 fs/dlm/debug_fs.c for (node = rb_first(tree); node; node = rb_next(node)) { rb_first 486 fs/dlm/debug_fs.c node = rb_first(tree); rb_first 551 fs/dlm/debug_fs.c next = rb_first(tree); rb_first 1091 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { rb_first 1667 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { rb_first 5477 fs/dlm/lock.c for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { rb_first 822 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { rb_first 828 fs/dlm/lockspace.c while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { rb_first 908 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { rb_first 944 fs/dlm/recover.c for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { rb_first 209 fs/ext2/balloc.c n = rb_first(root); rb_first 139 fs/ext4/block_validity.c node = rb_first(&sbi->system_blks->root); rb_first 570 fs/ext4/dir.c info->curr_node = rb_first(&info->root); rb_first 592 fs/ext4/dir.c info->curr_node = rb_first(&info->root); rb_first 185 fs/ext4/extents_status.c node = rb_first(&tree->root); rb_first 1755 fs/ext4/extents_status.c node = rb_first(&tree->root); rb_first 1777 fs/ext4/extents_status.c node = rb_first(&tree->root); rb_first 3521 fs/ext4/mballoc.c n = rb_first(&(grp->bb_free_root)); rb_first 2059 fs/fuse/dev.c p = rb_first(&fc->polled_files); rb_first 552 fs/gfs2/rgrp.c n = rb_first(&sdp->sd_rindex_tree); rb_first 574 fs/gfs2/rgrp.c n = rb_first(&sdp->sd_rindex_tree); rb_first 713 fs/gfs2/rgrp.c while ((n = rb_first(&rgd->rd_rstree))) { rb_first 726 fs/gfs2/rgrp.c while ((n = rb_first(&sdp->sd_rindex_tree))) { rb_first 2278 fs/gfs2/rgrp.c for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) { rb_first 329 fs/jffs2/nodelist.h struct rb_node *node = rb_first(root); rb_first 361 fs/jffs2/nodelist.h #define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb) rb_first 1201 fs/kernfs/dir.c rbn = rb_first(&pos->dir.children); rb_first 21 fs/nfs/blocklayout/extent_tree.c struct rb_node *node = rb_first(root); rb_first 2272 fs/nfs/dir.c while ((n = rb_first(root_node)) != NULL) { rb_first 210 fs/nfs/nfs4state.c for (pos = rb_first(&server->state_owners); rb_first 1717 fs/nfs/nfs4state.c for (pos = rb_first(&server->state_owners); rb_first 1768 fs/nfs/nfs4state.c for (pos = rb_first(&server->state_owners); rb_first 1876 fs/nfs/nfs4state.c for (pos = rb_first(&server->state_owners); rb_first 88 fs/ocfs2/reservations.c node = rb_first(&resmap->m_reservations); rb_first 143 fs/ocfs2/reservations.c node = rb_first(&resmap->m_reservations); rb_first 370 fs/ocfs2/reservations.c node = rb_first(&resmap->m_reservations); rb_first 529 fs/ocfs2/reservations.c next = rb_first(root); rb_first 58 fs/proc/generic.c return rb_entry_safe(rb_first(&dir->subdir), struct proc_dir_entry, rb_first 88 fs/proc/nommu.c for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) rb_first 404 fs/proc/proc_sysctl.c ctl_node = first_usable_entry(rb_first(&dir->root)); rb_first 29 fs/proc/task_nommu.c for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { rb_first 90 fs/proc/task_nommu.c for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { rb_first 108 fs/proc/task_nommu.c for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { rb_first 220 fs/proc/task_nommu.c for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) rb_first 620 fs/ubifs/debug.c for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { rb_first 708 fs/ubifs/debug.c for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) { rb_first 2148 fs/ubifs/debug.c struct rb_node *this = rb_first(&fsckd->inodes); rb_first 304 fs/ubifs/log.c p = rb_first(&c->buds); rb_first 1530 fs/ubifs/recovery.c struct rb_node *this = rb_first(&c->size_tree); rb_first 56 include/linux/rbtree.h extern struct rb_node *rb_first(const struct rb_root *); rb_first 3399 include/linux/skbuff.h #define skb_rb_first(root) rb_to_skb(rb_first(root)) rb_first 1660 kernel/events/core.c for (event = rb_entry_safe(rb_first(&((groups)->tree)), \ rb_first 3799 kernel/events/core.c event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), rb_first 47 kernel/power/wakelock.c for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { rb_first 190 kernel/trace/trace_stat.c node = rb_first(&session->stat_root); rb_first 205 kernel/trace/trace_stat.c return rb_first(&session->stat_root); rb_first 477 lib/rbtree.c EXPORT_SYMBOL(rb_first); rb_first 197 lib/rbtree_test.c for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { rb_first 223 lib/rbtree_test.c for (rb = rb_first(&root.rb_root); rb; rb = rb_next(rb)) { rb_first 295 lib/rbtree_test.c for (node = rb_first(&root.rb_root); node; node = rb_next(node)) rb_first 309 lib/rbtree_test.c node = rb_first(&root.rb_root); rb_first 778 mm/backing-dev.c while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { rb_first 2773 mm/ksm.c node = rb_first(root_stable_tree + nid); rb_first 2780 mm/ksm.c node = rb_first(root_stable_tree + nid); rb_first 2652 mm/mempolicy.c next = rb_first(&p->root); rb_first 333 mm/mmap.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 378 mm/mmap.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 461 mm/nommu.c lastp = rb_first(&nommu_region_tree); rb_first 1169 mm/nommu.c for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { rb_first 157 mm/swapfile.c struct rb_node *rb = rb_first(&sis->swap_extent_root); rb_first 385 net/802/garp.c for (node = rb_first(&app->gid); rb_first 570 net/802/mrp.c for (node = rb_first(&app->mad); rb_first 771 net/ceph/auth_x.c while ((p = rb_first(&xi->ticket_handlers)) != NULL) { rb_first 69 net/ceph/debugfs.c for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { rb_first 90 net/ceph/debugfs.c for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { rb_first 101 net/ceph/debugfs.c for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { rb_first 108 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { rb_first 119 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { rb_first 158 net/ceph/debugfs.c for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) { rb_first 234 net/ceph/debugfs.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { rb_first 260 net/ceph/debugfs.c for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { rb_first 321 net/ceph/debugfs.c for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) { rb_first 347 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 355 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 363 net/ceph/debugfs.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 960 net/ceph/mon_client.c for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { rb_first 1158 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 1161 net/ceph/osd_client.c for (p = rb_first(&osd->o_requests); p; ) { rb_first 1171 net/ceph/osd_client.c for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { rb_first 1316 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { rb_first 1326 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; ) { rb_first 1367 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { rb_first 1459 net/ceph/osd_client.c for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { rb_first 1873 net/ceph/osd_client.c rb_entry(rb_first(&osd->o_backoff_mappings), rb_first 1878 net/ceph/osd_client.c rb_entry(rb_first(&spg->backoffs), rb_first 3288 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 3292 net/ceph/osd_client.c for (p = rb_first(&osd->o_requests); p; ) { rb_first 3310 net/ceph/osd_client.c for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) { rb_first 3329 net/ceph/osd_client.c for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) { rb_first 3708 net/ceph/osd_client.c for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) { rb_first 3761 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; ) { rb_first 3797 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { rb_first 3855 net/ceph/osd_client.c for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) { rb_first 3881 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; ) { rb_first 3907 net/ceph/osd_client.c for (n = rb_first(need_resend); n; ) { rb_first 3922 net/ceph/osd_client.c for (n = rb_first(need_resend); n; ) { rb_first 4074 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; ) { rb_first 4087 net/ceph/osd_client.c for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { rb_first 4313 net/ceph/osd_client.c for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) { rb_first 4544 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; n = rb_next(n)) { rb_first 4548 net/ceph/osd_client.c for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) { rb_first 5120 net/ceph/osd_client.c for (n = rb_first(&osdc->osds); n; ) { rb_first 5212 net/ceph/osd_client.c struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), rb_first 180 net/ceph/osdmap.c rb_entry(rb_first(&c->choose_args), rb_first 703 net/ceph/osdmap.c for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { rb_first 953 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_temp), rb_first 960 net/ceph/osdmap.c rb_entry(rb_first(&map->primary_temp), rb_first 967 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_upmap), rb_first 974 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_upmap_items), rb_first 981 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_pools), rb_first 3094 net/core/skbuff.c struct rb_node *p = rb_first(root); rb_first 234 net/ipv4/inet_fragment.c struct rb_node *p = rb_first(root); rb_first 298 net/ipv4/inetpeer.c struct rb_node *p = rb_first(&base->rb_root); rb_first 1091 net/ipv4/nexthop.c while ((node = rb_first(root))) { rb_first 1731 net/ipv4/nexthop.c for (node = rb_first(root); node; node = rb_next(node)) { rb_first 2532 net/ipv4/tcp.c struct rb_node *p = rb_first(&sk->tcp_rtx_queue); rb_first 4486 net/ipv4/tcp_input.c p = rb_first(&tp->out_of_order_queue); rb_first 457 net/netfilter/nf_conncount.c for (node = rb_first(root); node != NULL; node = rb_next(node)) { rb_first 472 net/netfilter/nf_conncount.c node = rb_first(root); rb_first 564 net/netfilter/nf_conncount.c while ((node = rb_first(r)) != NULL) { rb_first 362 net/netfilter/nft_set_rbtree.c for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rb_first 398 net/netfilter/nft_set_rbtree.c for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rb_first 405 net/rds/cong.c while ((node = rb_first(&rds_cong_tree))) { rb_first 136 net/rds/rdma.c while ((node = rb_first(&rs->rs_rdma_keys))) { rb_first 471 net/sched/sch_fq.c while ((p = rb_first(&q->delayed)) != NULL) { rb_first 604 net/sched/sch_fq.c struct rb_node *p = rb_first(&flow->t_root); rb_first 636 net/sched/sch_fq.c while ((p = rb_first(root)) != NULL) { rb_first 665 net/sched/sch_fq.c while ((op = rb_first(oroot)) != NULL) { rb_first 224 net/sched/sch_hfsc.c for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { rb_first 240 net/sched/sch_hfsc.c n = rb_first(&q->eligible); rb_first 288 net/sched/sch_hfsc.c for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) { rb_first 653 net/sched/sch_hfsc.c struct rb_node *n = rb_first(&cl->cf_tree); rb_first 713 net/sched/sch_htb.c struct rb_node *p = rb_first(wait_pq); rb_first 363 net/sched/sch_netem.c struct rb_node *p = rb_first(&q->t_root); rb_first 473 net/smc/smc_core.c node = rb_first(&lgr->conns_all); rb_first 485 net/smc/smc_core.c node = rb_first(&lgr->conns_all); rb_first 268 net/tipc/group.c for (n = rb_first(&grp->members); n; n = rb_next(n)) { rb_first 395 net/tipc/group.c for (n = rb_first(&grp->members); n; n = rb_next(n)) { rb_first 305 net/tipc/name_table.c for (n = rb_first(&service->ranges); n; n = rb_next(n)) { rb_first 529 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { rb_first 564 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { rb_first 595 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { rb_first 881 net/tipc/name_table.c for (n = rb_first(&sc->ranges); n; n = rb_next(n)) { rb_first 959 net/xfrm/xfrm_policy.c while ((rnode = rb_first(&v->root)) != NULL) { rb_first 1054 net/xfrm/xfrm_policy.c struct rb_node *rn = rb_first(r); rb_first 78 security/apparmor/include/label.h for ((N) = rb_first(&(LS)->root); (N); (N) = rb_next(N)) rb_first 1960 security/apparmor/label.c for (node = rb_first(&ls->root); node; node = rb_first(&ls->root)) { rb_first 209 security/keys/gc.c cursor = rb_first(&key_serial_tree); rb_first 270 security/keys/proc.c struct rb_node *n = rb_first(r); rb_first 55 tools/include/linux/rbtree.h extern struct rb_node *rb_first(const struct rb_root *); rb_first 1001 tools/perf/builtin-kmem.c next = rb_first(root); rb_first 1054 tools/perf/builtin-kmem.c struct rb_node *next = rb_first(&page_alloc_sorted); rb_first 1103 tools/perf/builtin-kmem.c struct rb_node *next = rb_first(&page_caller_sorted); rb_first 1290 tools/perf/builtin-kmem.c node = rb_first(root); rb_first 1337 tools/perf/builtin-kmem.c node = rb_first(root); rb_first 562 tools/perf/builtin-kvm.c struct rb_node *node = rb_first(result); rb_first 771 tools/perf/builtin-lock.c node = rb_first(&thread_stats); rb_first 731 tools/perf/builtin-report.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { rb_first 203 tools/perf/tests/hists_cumulate.c for (node = rb_first(root), i = 0; rb_first 225 tools/perf/tests/hists_cumulate.c cnode = rb_entry(rb_first(root), struct callchain_node, rb_node); rb_first 145 tools/perf/ui/browser.c nd = rb_first(root); rb_first 174 tools/perf/ui/browser.c browser->top = rb_first(browser->entries); rb_first 723 tools/perf/ui/browsers/annotate.c nd = rb_first(&browser->entries); rb_first 178 tools/perf/ui/browsers/hists.c for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { rb_first 261 tools/perf/ui/browsers/hists.c for (nd = rb_first(chain); nd; nd = rb_next(nd)) { rb_first 327 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first(&node->rb_root); rb_first 329 tools/perf/ui/browsers/hists.c for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { rb_first 366 tools/perf/ui/browsers/hists.c struct rb_node *nd = rb_first(root); rb_first 369 tools/perf/ui/browsers/hists.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 466 tools/perf/ui/browsers/hists.c for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) { rb_first 507 tools/perf/ui/browsers/hists.c for (nd = rb_first(chain); nd; nd = rb_next(nd)) { rb_first 869 tools/perf/ui/browsers/hists.c node = rb_first(root); rb_first 973 tools/perf/ui/browsers/hists.c node = rb_first(root); rb_first 1062 tools/perf/ui/browsers/hists.c node = rb_first(root); rb_first 1867 tools/perf/ui/browsers/hists.c nd = hists__filter_entries(rb_first(browser->entries), rb_first 2097 tools/perf/ui/browsers/hists.c struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries), rb_first 120 tools/perf/ui/browsers/map.c for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) { rb_first 101 tools/perf/ui/gtk/hists.c bool has_single_node = (rb_first(root) == rb_last(root)); rb_first 103 tools/perf/ui/gtk/hists.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 165 tools/perf/ui/gtk/hists.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 224 tools/perf/ui/gtk/hists.c bool has_single_node = (rb_first(root) == rb_last(root)); rb_first 226 tools/perf/ui/gtk/hists.c for (nd = rb_first(root); nd; nd = rb_next(nd)) { rb_first 124 tools/perf/ui/stdio/hist.c node = rb_first(root); rb_first 229 tools/perf/ui/stdio/hist.c node = rb_first(root); rb_first 310 tools/perf/ui/stdio/hist.c struct rb_node *rb_node = rb_first(tree); rb_first 361 tools/perf/ui/stdio/hist.c struct rb_node *rb_node = rb_first(tree); rb_first 2189 tools/perf/util/annotate.c node = rb_first(src_root); rb_first 2215 tools/perf/util/annotate.c node = rb_first(root); rb_first 22 tools/perf/util/block-range.c for (rb = rb_first(&block_ranges.root); rb; rb = rb_next(rb)) { rb_first 419 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 450 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 481 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 546 tools/perf/util/callchain.c n = rb_first(&new->rb_root_in); rb_first 1021 tools/perf/util/callchain.c n = rb_first(&src->rb_root_in); rb_first 1261 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 1472 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 1497 tools/perf/util/callchain.c n = rb_first(&node->rb_root_in); rb_first 757 tools/perf/util/dso.c struct rb_node *next = rb_first(root); rb_first 135 tools/perf/util/env.c next = rb_first(root); rb_first 149 tools/perf/util/env.c next = rb_first(root); rb_first 931 tools/perf/util/header.c next = rb_first(root); rb_first 981 tools/perf/util/header.c next = rb_first(root); rb_first 1541 tools/perf/util/header.c next = rb_first(root); rb_first 1565 tools/perf/util/header.c next = rb_first(root); rb_first 598 tools/perf/util/map.c struct rb_node *next = rb_first(root); rb_first 612 tools/perf/util/map.c struct rb_node *next = rb_first(root); rb_first 694 tools/perf/util/map.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { rb_first 746 tools/perf/util/map.c for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { rb_first 1003 tools/perf/util/map.c struct rb_node *first = rb_first(&maps->entries); rb_first 96 tools/perf/util/rb_resort.h for (nd = rb_first(entries); nd; nd = rb_next(nd)) { \ rb_first 128 tools/perf/util/rb_resort.h for (__nd = rb_first(&__name->entries); \ rb_first 946 tools/perf/util/symbol.c struct rb_node *next = rb_first(modules); rb_first 1029 tools/perf/util/symbol.c from_node = rb_first(&from_modules); rb_first 1030 tools/perf/util/symbol.c to_node = rb_first(&to_modules);