new_entry 1743 arch/x86/kvm/svm.c u64 *entry, new_entry; new_entry 1764 arch/x86/kvm/svm.c new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & new_entry 1767 arch/x86/kvm/svm.c WRITE_ONCE(*entry, new_entry); new_entry 4615 arch/x86/kvm/svm.c u32 *entry, new_entry; new_entry 4622 arch/x86/kvm/svm.c new_entry = READ_ONCE(*entry); new_entry 4623 arch/x86/kvm/svm.c new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK; new_entry 4624 arch/x86/kvm/svm.c new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK); new_entry 4625 arch/x86/kvm/svm.c new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK; new_entry 4626 arch/x86/kvm/svm.c WRITE_ONCE(*entry, new_entry); new_entry 370 drivers/firmware/efi/efivars.c struct efivar_entry *new_entry; new_entry 406 drivers/firmware/efi/efivars.c new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); new_entry 407 drivers/firmware/efi/efivars.c if (!new_entry) new_entry 411 drivers/firmware/efi/efivars.c copy_out_compat(&new_entry->var, compat); new_entry 413 drivers/firmware/efi/efivars.c memcpy(&new_entry->var, new_var, sizeof(*new_var)); new_entry 415 drivers/firmware/efi/efivars.c err = efivar_entry_set(new_entry, attributes, size, new_entry 423 drivers/firmware/efi/efivars.c if (efivar_create_sysfs_entry(new_entry)) { new_entry 425 drivers/firmware/efi/efivars.c kfree(new_entry); new_entry 430 drivers/firmware/efi/efivars.c kfree(new_entry); new_entry 3940 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c bool new_entry = true; new_entry 3944 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c if (new_entry) { new_entry 3945 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c new_entry = false; new_entry 3952 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c new_entry = true; new_entry 638 drivers/infiniband/hw/hfi1/affinity.c bool new_entry = false; new_entry 669 drivers/infiniband/hw/hfi1/affinity.c new_entry = true; new_entry 746 drivers/infiniband/hw/hfi1/affinity.c ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry); new_entry 750 drivers/infiniband/hw/hfi1/affinity.c if (new_entry) new_entry 758 drivers/infiniband/hw/hfi1/affinity.c if (new_entry) new_entry 812 drivers/iommu/virtio-iommu.c struct iommu_resv_region *entry, *new_entry, *msi = NULL; new_entry 821 drivers/iommu/virtio-iommu.c new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); new_entry 822 drivers/iommu/virtio-iommu.c if (!new_entry) new_entry 824 drivers/iommu/virtio-iommu.c list_add_tail(&new_entry->list, head); new_entry 1092 drivers/net/dsa/mt7530.c struct mt7530_hw_vlan_entry new_entry; new_entry 1105 drivers/net/dsa/mt7530.c mt7530_hw_vlan_entry_init(&new_entry, port, untagged); new_entry 1106 drivers/net/dsa/mt7530.c mt7530_hw_vlan_update(priv, vid, &new_entry, new_entry 378 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct hash_mac_addr *new_entry; new_entry 389 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); new_entry 390 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (!new_entry) new_entry 392 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ether_addr_copy(new_entry->addr, mac_addr); new_entry 393 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c list_add_tail(&new_entry->list, &adap->mac_hlist); new_entry 463 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct hash_mac_addr *entry, *new_entry; new_entry 479 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); new_entry 480 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (!new_entry) new_entry 482 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ether_addr_copy(new_entry->addr, addr); new_entry 483 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c new_entry->iface_mac = true; new_entry 484 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c list_add_tail(&new_entry->list, &adapter->mac_hlist); new_entry 277 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c struct hash_mac_addr *new_entry, *entry; new_entry 293 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); new_entry 294 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c if (!new_entry) new_entry 296 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c ether_addr_copy(new_entry->addr, addr); new_entry 297 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c new_entry->iface_mac = true; new_entry 298 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c list_add_tail(&new_entry->list, &adapter->mac_hlist); new_entry 947 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c struct hash_mac_addr *new_entry; new_entry 958 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); new_entry 959 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c if (!new_entry) new_entry 961 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c ether_addr_copy(new_entry->addr, mac_addr); new_entry 962 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c list_add_tail(&new_entry->list, &adapter->mac_hlist); new_entry 718 drivers/net/ethernet/marvell/octeontx2/af/mbox.h u16 new_entry[NPC_MCAM_MAX_SHIFTS]; new_entry 1813 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c u16 old_entry, new_entry; new_entry 1827 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c new_entry = req->new_entry[index]; new_entry 1836 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); new_entry 1841 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { new_entry 1847 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); new_entry 1850 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); new_entry 1858 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c new_entry, cntr); new_entry 1862 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); new_entry 155 drivers/net/ethernet/mellanox/mlx4/mcg.c struct mlx4_steer_index *new_entry; new_entry 165 drivers/net/ethernet/mellanox/mlx4/mcg.c new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); new_entry 166 drivers/net/ethernet/mellanox/mlx4/mcg.c if (!new_entry) new_entry 169 drivers/net/ethernet/mellanox/mlx4/mcg.c INIT_LIST_HEAD(&new_entry->duplicates); new_entry 170 drivers/net/ethernet/mellanox/mlx4/mcg.c new_entry->index = index; new_entry 171 drivers/net/ethernet/mellanox/mlx4/mcg.c list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); new_entry 184 drivers/net/ethernet/mellanox/mlx4/mcg.c list_add_tail(&dqp->list, &new_entry->duplicates); new_entry 233 drivers/net/ethernet/mellanox/mlx4/mcg.c list_del(&new_entry->list); new_entry 234 drivers/net/ethernet/mellanox/mlx4/mcg.c kfree(new_entry); new_entry 1117 drivers/net/ethernet/mellanox/mlx4/mcg.c u8 new_entry = 0; new_entry 1132 drivers/net/ethernet/mellanox/mlx4/mcg.c new_entry = 1; new_entry 1146 drivers/net/ethernet/mellanox/mlx4/mcg.c new_entry = 1; new_entry 1193 drivers/net/ethernet/mellanox/mlx4/mcg.c if (new_entry) new_entry 1240 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c struct mlxsw_sp_acl_tcam_entry *new_entry; new_entry 1249 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk); new_entry 1250 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c if (IS_ERR(new_entry)) new_entry 1251 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c return PTR_ERR(new_entry); new_entry 1253 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ventry->entry = new_entry; new_entry 2959 drivers/net/wireless/intersil/hostap/hostap_hw.c struct set_tim_data *new_entry; new_entry 2966 drivers/net/wireless/intersil/hostap/hostap_hw.c new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); new_entry 2967 drivers/net/wireless/intersil/hostap/hostap_hw.c if (new_entry == NULL) new_entry 2970 drivers/net/wireless/intersil/hostap/hostap_hw.c new_entry->aid = aid; new_entry 2971 drivers/net/wireless/intersil/hostap/hostap_hw.c new_entry->set = set; new_entry 2982 drivers/net/wireless/intersil/hostap/hostap_hw.c kfree(new_entry); new_entry 2983 drivers/net/wireless/intersil/hostap/hostap_hw.c new_entry = NULL; new_entry 2987 drivers/net/wireless/intersil/hostap/hostap_hw.c if (new_entry) new_entry 2988 drivers/net/wireless/intersil/hostap/hostap_hw.c list_add_tail(&new_entry->list, &local->set_tim_list); new_entry 65 drivers/s390/cio/qdio_debug.c struct qdio_dbf_entry *new_entry; new_entry 99 drivers/s390/cio/qdio_debug.c new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL); new_entry 100 drivers/s390/cio/qdio_debug.c if (!new_entry) { new_entry 104 drivers/s390/cio/qdio_debug.c strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN); new_entry 105 drivers/s390/cio/qdio_debug.c new_entry->dbf_info = irq_ptr->debug_area; new_entry 107 drivers/s390/cio/qdio_debug.c list_add(&new_entry->dbf_list, &qdio_dbf_list); new_entry 5538 drivers/s390/net/qeth_core_main.c struct qeth_dbf_entry *new_entry; new_entry 5547 drivers/s390/net/qeth_core_main.c new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); new_entry 5548 drivers/s390/net/qeth_core_main.c if (!new_entry) new_entry 5550 drivers/s390/net/qeth_core_main.c strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); new_entry 5551 drivers/s390/net/qeth_core_main.c new_entry->dbf_info = card->debug; new_entry 5553 drivers/s390/net/qeth_core_main.c list_add(&new_entry->dbf_list, &qeth_dbf_list); new_entry 1346 drivers/scsi/hpsa.c int entry, struct hpsa_scsi_dev_t *new_entry) new_entry 1352 drivers/scsi/hpsa.c h->dev[entry]->raid_level = new_entry->raid_level; new_entry 1357 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; new_entry 1360 drivers/scsi/hpsa.c if (new_entry->offload_config && new_entry->offload_to_be_enabled) { new_entry 1369 drivers/scsi/hpsa.c h->dev[entry]->raid_map = new_entry->raid_map; new_entry 1370 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; new_entry 1372 drivers/scsi/hpsa.c if (new_entry->offload_to_be_enabled) { new_entry 1373 drivers/scsi/hpsa.c h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; new_entry 1376 drivers/scsi/hpsa.c h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; new_entry 1377 drivers/scsi/hpsa.c h->dev[entry]->offload_config = new_entry->offload_config; new_entry 1378 drivers/scsi/hpsa.c h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; new_entry 1379 drivers/scsi/hpsa.c h->dev[entry]->queue_depth = new_entry->queue_depth; new_entry 1386 drivers/scsi/hpsa.c h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled; new_entry 1391 drivers/scsi/hpsa.c if (!new_entry->offload_to_be_enabled) new_entry 1399 drivers/scsi/hpsa.c int entry, struct hpsa_scsi_dev_t *new_entry, new_entry 1412 drivers/scsi/hpsa.c if (new_entry->target == -1) { new_entry 1413 drivers/scsi/hpsa.c new_entry->target = h->dev[entry]->target; new_entry 1414 drivers/scsi/hpsa.c new_entry->lun = h->dev[entry]->lun; new_entry 1417 drivers/scsi/hpsa.c h->dev[entry] = new_entry; new_entry 1418 drivers/scsi/hpsa.c added[*nadded] = new_entry; new_entry 1421 drivers/scsi/hpsa.c hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); new_entry 1710 drivers/scsi/nsp32.c int new_entry; new_entry 1726 drivers/scsi/nsp32.c for (new_entry = old_entry; new_entry < sg_num; new_entry++) { new_entry 1727 drivers/scsi/nsp32.c sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND); new_entry 1734 drivers/scsi/nsp32.c if (new_entry == sg_num) { new_entry 1748 drivers/scsi/nsp32.c len = le32_to_cpu(sgt[new_entry].len); new_entry 1749 drivers/scsi/nsp32.c addr = le32_to_cpu(sgt[new_entry].addr); new_entry 1751 drivers/scsi/nsp32.c sgt[new_entry].addr = cpu_to_le32(addr); new_entry 1752 drivers/scsi/nsp32.c sgt[new_entry].len = cpu_to_le32(restlen); new_entry 1755 drivers/scsi/nsp32.c data->cur_entry = new_entry; new_entry 1520 drivers/scsi/pmcraid.c u32 new_entry = 1; new_entry 1581 drivers/scsi/pmcraid.c new_entry = 0; new_entry 1586 drivers/scsi/pmcraid.c if (new_entry) { new_entry 1231 drivers/staging/exfat/exfat_super.c s32 new_entry = 0; new_entry 1281 drivers/staging/exfat/exfat_super.c new_entry = new_fid->entry; new_entry 1282 drivers/staging/exfat/exfat_super.c ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); new_entry 1319 drivers/staging/exfat/exfat_super.c ep = get_entry_in_dir(sb, p_dir, new_entry, NULL); new_entry 1324 drivers/staging/exfat/exfat_super.c new_entry, ep); new_entry 1327 drivers/staging/exfat/exfat_super.c p_fs->fs_func->delete_dir_entry(sb, p_dir, new_entry, 0, new_entry 370 fs/cifs/readdir.c char *new_entry; new_entry 377 fs/cifs/readdir.c new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + new_entry 386 fs/cifs/readdir.c new_entry = old_entry + next_offset; new_entry 388 fs/cifs/readdir.c cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry); new_entry 390 fs/cifs/readdir.c if (new_entry >= end_of_smb) { new_entry 392 fs/cifs/readdir.c new_entry, end_of_smb, old_entry); new_entry 395 fs/cifs/readdir.c (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) new_entry 397 fs/cifs/readdir.c (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) { new_entry 399 fs/cifs/readdir.c new_entry, end_of_smb); new_entry 402 fs/cifs/readdir.c return new_entry; new_entry 720 fs/dax.c void *new_entry = dax_make_entry(pfn, flags); new_entry 741 fs/dax.c dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); new_entry 750 fs/dax.c old = dax_lock_entry(xas, new_entry); new_entry 753 fs/dax.c entry = new_entry; new_entry 71 fs/ext4/block_validity.c struct ext4_system_zone *new_entry = NULL, *entry; new_entry 88 fs/ext4/block_validity.c new_entry = rb_entry(new_node, struct ext4_system_zone, new_entry 94 fs/ext4/block_validity.c if (!new_entry) { new_entry 95 fs/ext4/block_validity.c new_entry = kmem_cache_alloc(ext4_system_zone_cachep, new_entry 97 fs/ext4/block_validity.c if (!new_entry) new_entry 99 fs/ext4/block_validity.c new_entry->start_blk = start_blk; new_entry 100 fs/ext4/block_validity.c new_entry->count = count; new_entry 101 fs/ext4/block_validity.c new_node = &new_entry->node; new_entry 111 fs/ext4/block_validity.c if (can_merge(entry, new_entry)) { new_entry 112 fs/ext4/block_validity.c new_entry->start_blk = entry->start_blk; new_entry 113 fs/ext4/block_validity.c new_entry->count += entry->count; new_entry 123 fs/ext4/block_validity.c if (can_merge(new_entry, entry)) { new_entry 124 fs/ext4/block_validity.c new_entry->count += entry->count; new_entry 4623 fs/ext4/mballoc.c struct ext4_free_data *new_entry, new_entry 4626 fs/ext4/mballoc.c if ((entry->efd_tid != new_entry->efd_tid) || new_entry 4627 fs/ext4/mballoc.c (entry->efd_group != new_entry->efd_group)) new_entry 4630 fs/ext4/mballoc.c new_entry->efd_start_cluster) { new_entry 4631 fs/ext4/mballoc.c new_entry->efd_start_cluster = entry->efd_start_cluster; new_entry 4632 fs/ext4/mballoc.c new_entry->efd_count += entry->efd_count; new_entry 4633 fs/ext4/mballoc.c } else if (new_entry->efd_start_cluster + new_entry->efd_count == new_entry 4635 fs/ext4/mballoc.c new_entry->efd_count += entry->efd_count; new_entry 4647 fs/ext4/mballoc.c struct ext4_free_data *new_entry) new_entry 4651 fs/ext4/mballoc.c ext4_grpblk_t clusters = new_entry->efd_count; new_entry 4663 fs/ext4/mballoc.c new_node = &new_entry->efd_node; new_entry 4664 fs/ext4/mballoc.c cluster = new_entry->efd_start_cluster; new_entry 4698 fs/ext4/mballoc.c ext4_try_merge_freed_extent(sbi, entry, new_entry, new_entry 4705 fs/ext4/mballoc.c ext4_try_merge_freed_extent(sbi, entry, new_entry, new_entry 4710 fs/ext4/mballoc.c list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); new_entry 4893 fs/ext4/mballoc.c struct ext4_free_data *new_entry; new_entry 4898 fs/ext4/mballoc.c new_entry = kmem_cache_alloc(ext4_free_data_cachep, new_entry 4900 fs/ext4/mballoc.c new_entry->efd_start_cluster = bit; new_entry 4901 fs/ext4/mballoc.c new_entry->efd_group = block_group; new_entry 4902 fs/ext4/mballoc.c new_entry->efd_count = count_clusters; new_entry 4903 fs/ext4/mballoc.c new_entry->efd_tid = handle->h_transaction->t_tid; new_entry 4907 fs/ext4/mballoc.c ext4_mb_free_metadata(handle, &e4b, new_entry); new_entry 857 fs/f2fs/namei.c struct f2fs_dir_entry *new_entry; new_entry 915 fs/f2fs/namei.c new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, new_entry 917 fs/f2fs/namei.c if (!new_entry) { new_entry 931 fs/f2fs/namei.c f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_entry 1050 fs/f2fs/namei.c struct f2fs_dir_entry *old_entry, *new_entry; new_entry 1083 fs/f2fs/namei.c new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); new_entry 1084 fs/f2fs/namei.c if (!new_entry) { new_entry 1160 fs/f2fs/namei.c f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_entry 1005 security/tomoyo/common.h int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, new_entry 1013 security/tomoyo/common.h int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, new_entry 31 security/tomoyo/domain.c int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, new_entry 48 security/tomoyo/domain.c if (!check_duplicate(entry, new_entry)) new_entry 55 security/tomoyo/domain.c entry = tomoyo_commit_ok(new_entry, size); new_entry 92 security/tomoyo/domain.c int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, new_entry 108 security/tomoyo/domain.c new_entry->cond = tomoyo_get_condition(param); new_entry 109 security/tomoyo/domain.c if (!new_entry->cond) new_entry 115 security/tomoyo/domain.c if (new_entry->cond->transit && new_entry 116 security/tomoyo/domain.c !(new_entry->type == TOMOYO_TYPE_PATH_ACL && new_entry 117 security/tomoyo/domain.c container_of(new_entry, struct tomoyo_path_acl, head) new_entry 127 security/tomoyo/domain.c if (!tomoyo_same_acl_head(entry, new_entry) || new_entry 128 security/tomoyo/domain.c !check_duplicate(entry, new_entry)) new_entry 131 security/tomoyo/domain.c entry->is_deleted = merge_duplicate(entry, new_entry, new_entry 139 security/tomoyo/domain.c entry = tomoyo_commit_ok(new_entry, size); new_entry 147 security/tomoyo/domain.c tomoyo_put_condition(new_entry->cond); new_entry 13 tools/perf/util/rblist.c int rblist__add_node(struct rblist *rblist, const void *new_entry) new_entry 24 tools/perf/util/rblist.c rc = rblist->node_cmp(parent, new_entry); new_entry 35 tools/perf/util/rblist.c new_node = rblist->node_new(rblist, new_entry); new_entry 27 tools/perf/util/rblist.h struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry); new_entry 34 tools/perf/util/rblist.h int rblist__add_node(struct rblist *rblist, const void *new_entry); new_entry 63 tools/perf/util/strlist.c int strlist__add(struct strlist *slist, const char *new_entry) new_entry 65 tools/perf/util/strlist.c return rblist__add_node(&slist->rblist, new_entry); new_entry 2115 tools/perf/util/symbol.c static int vmlinux_path__add(const char *new_entry) new_entry 2117 tools/perf/util/symbol.c vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);