/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/ |
D | event.c | 31 if (--event->refs[index * event->types_nr + type] == 0) { in nvkm_event_put() 44 if (++event->refs[index * event->types_nr + type] == 1) { in nvkm_event_get() 58 if (!event->refs || WARN_ON(index >= event->index_nr)) in nvkm_event_send() 77 if (event->refs) { in nvkm_event_fini() 78 kfree(event->refs); in nvkm_event_fini() 79 event->refs = NULL; in nvkm_event_fini() 87 event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr, in nvkm_event_init() 89 if (!event->refs) in nvkm_event_init()
|
D | notify.c | 139 if ((notify->event = event), event->refs) { in nvkm_notify_init()
|
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/ |
D | libcfs_lock.c | 150 cfs_percpt_atomic_free(atomic_t **refs) in cfs_percpt_atomic_free() argument 152 cfs_percpt_free(refs); in cfs_percpt_atomic_free() 160 atomic_t **refs; in cfs_percpt_atomic_alloc() local 164 refs = cfs_percpt_alloc(cptab, sizeof(*ref)); in cfs_percpt_atomic_alloc() 165 if (!refs) in cfs_percpt_atomic_alloc() 168 cfs_percpt_for_each(ref, i, refs) in cfs_percpt_atomic_alloc() 170 return refs; in cfs_percpt_atomic_alloc() 176 cfs_percpt_atomic_summary(atomic_t **refs) in cfs_percpt_atomic_summary() argument 182 cfs_percpt_for_each(ref, i, refs) in cfs_percpt_atomic_summary()
|
/linux-4.4.14/arch/s390/mm/ |
D | gup.c | 59 int refs; in gup_huge_pmd() local 67 refs = 0; in gup_huge_pmd() 76 refs++; in gup_huge_pmd() 79 if (!page_cache_add_speculative(head, refs)) { in gup_huge_pmd() 80 *nr -= refs; in gup_huge_pmd() 85 *nr -= refs; in gup_huge_pmd() 86 while (refs--) in gup_huge_pmd() 95 while (refs--) { in gup_huge_pmd()
|
/linux-4.4.14/arch/sparc/mm/ |
D | gup.c | 74 int refs; in gup_huge_pmd() local 82 refs = 0; in gup_huge_pmd() 91 refs++; in gup_huge_pmd() 94 if (!page_cache_add_speculative(head, refs)) { in gup_huge_pmd() 95 *nr -= refs; in gup_huge_pmd() 100 *nr -= refs; in gup_huge_pmd() 101 while (refs--) in gup_huge_pmd() 109 while (refs--) { in gup_huge_pmd()
|
/linux-4.4.14/mm/ |
D | gup.c | 1123 int refs; in gup_huge_pmd() local 1128 refs = 0; in gup_huge_pmd() 1137 refs++; in gup_huge_pmd() 1140 if (!page_cache_add_speculative(head, refs)) { in gup_huge_pmd() 1141 *nr -= refs; in gup_huge_pmd() 1146 *nr -= refs; in gup_huge_pmd() 1147 while (refs--) in gup_huge_pmd() 1157 while (refs--) { in gup_huge_pmd() 1170 int refs; in gup_huge_pud() local 1175 refs = 0; in gup_huge_pud() [all …]
|
D | hugetlb.c | 709 kref_init(&resv_map->refs); in resv_map_alloc() 724 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); in resv_map_release() 2974 kref_get(&resv->refs); in hugetlb_vm_op_open() 2993 kref_put(&resv->refs, resv_map_release); in hugetlb_vm_op_close() 4101 kref_put(&resv_map->refs, resv_map_release); in hugetlb_reserve_pages()
|
/linux-4.4.14/arch/mips/mm/ |
D | gup.c | 76 int refs; in gup_huge_pmd() local 84 refs = 0; in gup_huge_pmd() 94 refs++; in gup_huge_pmd() 97 get_head_page_multiple(head, refs); in gup_huge_pmd() 142 int refs; in gup_huge_pud() local 150 refs = 0; in gup_huge_pud() 160 refs++; in gup_huge_pud() 163 get_head_page_multiple(head, refs); in gup_huge_pud()
|
/linux-4.4.14/arch/x86/mm/ |
D | gup.c | 122 int refs; in gup_huge_pmd() local 133 refs = 0; in gup_huge_pmd() 143 refs++; in gup_huge_pmd() 145 get_head_page_multiple(head, refs); in gup_huge_pmd() 198 int refs; in gup_huge_pud() local 209 refs = 0; in gup_huge_pud() 219 refs++; in gup_huge_pud() 221 get_head_page_multiple(head, refs); in gup_huge_pud()
|
/linux-4.4.14/fs/btrfs/ |
D | delayed-inode.c | 56 atomic_set(&delayed_node->refs, 0); in btrfs_init_delayed_node() 96 atomic_inc(&node->refs); in btrfs_get_delayed_node() 104 atomic_inc(&node->refs); /* can be accessed */ in btrfs_get_delayed_node() 111 atomic_add(2, &node->refs); in btrfs_get_delayed_node() 141 atomic_add(2, &node->refs); in btrfs_get_or_create_delayed_node() 182 atomic_inc(&node->refs); /* inserted into list */ in btrfs_queue_delayed_node() 196 atomic_dec(&node->refs); /* not in the list */ in btrfs_dequeue_delayed_node() 217 atomic_inc(&node->refs); in btrfs_first_delayed_node() 244 atomic_inc(&next->refs); in btrfs_next_delayed_node() 269 if (atomic_dec_and_test(&delayed_node->refs)) { in __btrfs_release_delayed_node() [all …]
|
D | delayed-ref.h | 56 atomic_t refs; member 221 WARN_ON(atomic_read(&ref->refs) == 0); in btrfs_put_delayed_ref() 222 if (atomic_dec_and_test(&ref->refs)) { in btrfs_put_delayed_ref()
|
D | extent_map.c | 58 atomic_set(&em->refs, 1); in alloc_extent_map() 74 WARN_ON(atomic_read(&em->refs) == 0); in free_extent_map() 75 if (atomic_dec_and_test(&em->refs)) { in free_extent_map() 325 atomic_inc(&em->refs); in setup_extent_mapping() 384 atomic_inc(&em->refs); in __lookup_extent_mapping()
|
D | delayed-inode.h | 70 atomic_t refs; member 83 atomic_t refs; member
|
D | ordered-data.c | 213 atomic_set(&entry->refs, 1); in __btrfs_add_ordered_extent() 359 atomic_inc(&entry->refs); in btrfs_dec_test_first_ordered_pending() 426 atomic_inc(&entry->refs); in btrfs_dec_test_ordered_pending() 457 atomic_inc(&ordered->refs); in btrfs_get_logged_extents() 566 if (atomic_dec_and_test(&entry->refs)) { in btrfs_put_ordered_extent() 680 atomic_inc(&ordered->refs); in btrfs_wait_ordered_extents() 854 atomic_inc(&entry->refs); in btrfs_lookup_ordered_extent() 896 atomic_inc(&entry->refs); in btrfs_lookup_ordered_range() 933 atomic_inc(&entry->refs); in btrfs_lookup_first_ordered_extent()
|
D | disk-io.h | 105 if (atomic_inc_not_zero(&root->refs)) in btrfs_grab_fs_root() 112 if (atomic_dec_and_test(&root->refs)) in btrfs_put_fs_root()
|
D | extent_io.c | 70 atomic_read(&state->refs)); in btrfs_leak_debug_check() 79 eb->start, eb->len, atomic_read(&eb->refs)); in btrfs_leak_debug_check() 238 atomic_set(&state->refs, 1); in alloc_extent_state() 248 if (atomic_dec_and_test(&state->refs)) { in free_extent_state() 642 atomic_dec(&cached->refs); in __clear_extent_bit() 794 atomic_inc(&state->refs); in wait_extent_bit() 835 atomic_inc(&state->refs); in cache_state_if_flags() 1629 atomic_inc(&state->refs); in find_delalloc_range() 2951 atomic_inc(&em->refs); in __get_extent_map() 2962 atomic_inc(&em->refs); in __get_extent_map() [all …]
|
D | extent-tree.c | 807 u64 offset, int metadata, u64 *refs, u64 *flags) in btrfs_lookup_extent_info() argument 899 atomic_inc(&head->node.refs); in btrfs_lookup_extent_info() 926 if (refs) in btrfs_lookup_extent_info() 927 *refs = num_refs; in btrfs_lookup_extent_info() 1055 u64 refs; in convert_extent_item_v0() local 1064 refs = btrfs_extent_refs_v0(leaf, ei0); in convert_extent_item_v0() 1104 btrfs_set_extent_refs(leaf, item, refs); in convert_extent_item_v0() 1736 u64 refs; in setup_inline_extent_backref() local 1750 refs = btrfs_extent_refs(leaf, ei); in setup_inline_extent_backref() 1751 refs += refs_to_add; in setup_inline_extent_backref() [all …]
|
D | extent_map.h | 36 atomic_t refs; member
|
D | extent_io.h | 112 atomic_t refs; member 131 atomic_t refs; member 308 atomic_inc(&eb->refs); in extent_buffer_get()
|
D | scrub.c | 67 atomic_t refs; member 82 atomic_t refs; member 115 atomic_t refs; /* free mem on transition to zero */ member 145 atomic_t refs; member 205 atomic_t refs; member 310 atomic_inc(&sctx->refs); in scrub_pending_bio_inc() 361 atomic_inc(&sctx->refs); in scrub_pending_trans_workers_inc() 452 if (atomic_dec_and_test(&sctx->refs)) in scrub_put_ctx() 467 atomic_set(&sctx->refs, 1); in scrub_setup_ctx() 861 atomic_inc(&recover->refs); in scrub_get_recover() [all …]
|
D | raid56.c | 152 atomic_t refs; member 392 atomic_dec(&rbio->refs); in __remove_rbio_from_cache() 483 atomic_inc(&rbio->refs); in cache_rbio() 683 atomic_dec(&cur->refs); in lock_stripe_add() 732 atomic_inc(&rbio->refs); in lock_stripe_add() 778 atomic_dec(&rbio->refs); in unlock_stripe() 795 atomic_inc(&next->refs); in unlock_stripe() 837 WARN_ON(atomic_read(&rbio->refs) < 0); in __free_raid_bio() 838 if (!atomic_dec_and_test(&rbio->refs)) in __free_raid_bio() 989 atomic_set(&rbio->refs, 1); in alloc_rbio()
|
D | delayed-ref.c | 167 atomic_inc(&head->node.refs); in btrfs_delayed_ref_lock() 579 atomic_set(&ref->refs, 1); in add_delayed_ref_head() 667 atomic_set(&ref->refs, 1); in add_delayed_tree_ref() 722 atomic_set(&ref->refs, 1); in add_delayed_data_ref()
|
D | backref.c | 930 u64 time_seq, struct ulist *refs, argument 995 atomic_inc(&head->node.refs); 1100 ret = ulist_add_merge_ptr(refs, ref->parent, 1676 struct ulist *refs = NULL; local 1697 tree_mod_seq_elem.seq, &refs, 1703 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { 1722 free_leaf_list(refs);
|
D | ordered-data.h | 114 atomic_t refs; member
|
D | ctree.h | 621 __le64 refs; member 627 __le32 refs; member 774 __le32 refs; member 1944 atomic_t refs; member 2570 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); 2575 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); 2926 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); 2935 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); 3406 u64 offset, int metadata, u64 *refs, u64 *flags);
|
D | volumes.h | 302 atomic_t refs; member
|
D | ctree.c | 162 if (atomic_inc_not_zero(&eb->refs)) { in btrfs_root_node() 983 u64 refs; in update_ref_for_cow() local 1009 &refs, &flags); in update_ref_for_cow() 1012 if (refs == 0) { in update_ref_for_cow() 1018 refs = 1; in update_ref_for_cow() 1030 if (refs > 1) { in update_ref_for_cow()
|
D | disk-io.c | 1063 BUG_ON(!atomic_read(&eb->refs)); in btree_set_page_dirty() 1268 atomic_set(&root->refs, 1); in __setup_root() 4202 atomic_inc(&head->node.refs); in btrfs_destroy_delayed_refs()
|
D | volumes.c | 4735 atomic_inc(&em->refs); in __btrfs_alloc_chunk() 5189 atomic_set(&bbio->refs, 1); in alloc_btrfs_bio() 5196 WARN_ON(!atomic_read(&bbio->refs)); in btrfs_get_bbio() 5197 atomic_inc(&bbio->refs); in btrfs_get_bbio() 5204 if (atomic_dec_and_test(&bbio->refs)) in btrfs_put_bbio()
|
D | send.c | 3860 struct fs_path *name, void *ctx, struct list_head *refs) in record_ref() argument 3883 ret = __record_ref(refs, dir, gen, p); in record_ref()
|
D | tree-log.c | 4162 atomic_inc(&em->refs); in btrfs_log_changed_extents()
|
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/ |
D | lib-eq.c | 156 int **refs = NULL; in LNetEQFree() local 190 refs = eq->eq_refs; in LNetEQFree() 201 if (refs != NULL) in LNetEQFree() 202 cfs_percpt_free(refs); in LNetEQFree()
|
/linux-4.4.14/net/netfilter/ |
D | nf_conntrack_ecache.c | 42 struct nf_conn *refs[16]; in ecache_work_evict_list() local 63 refs[evicted] = ct; in ecache_work_evict_list() 65 if (++evicted >= ARRAY_SIZE(refs)) { in ecache_work_evict_list() 75 nf_ct_put(refs[--evicted]); in ecache_work_evict_list()
|
/linux-4.4.14/fs/btrfs/tests/ |
D | qgroup-tests.c | 94 u64 refs; in add_tree_ref() local 119 refs = btrfs_extent_refs(path->nodes[0], item); in add_tree_ref() 120 btrfs_set_extent_refs(path->nodes[0], item, refs + 1); in add_tree_ref() 178 u64 refs; in remove_extent_ref() local 203 refs = btrfs_extent_refs(path->nodes[0], item); in remove_extent_ref() 204 btrfs_set_extent_refs(path->nodes[0], item, refs - 1); in remove_extent_ref()
|
/linux-4.4.14/drivers/misc/sgi-xp/ |
D | xpc.h | 956 s32 refs = atomic_dec_return(&ch->references); in xpc_msgqueue_deref() local 958 DBUG_ON(refs < 0); in xpc_msgqueue_deref() 959 if (refs == 0) in xpc_msgqueue_deref() 973 s32 refs = atomic_dec_return(&part->references); in xpc_part_deref() local 975 DBUG_ON(refs < 0); in xpc_part_deref() 976 if (refs == 0 && part->setup_state == XPC_P_SS_WTEARDOWN) in xpc_part_deref()
|
/linux-4.4.14/tools/perf/arch/x86/util/ |
D | intel-bts.c | 284 struct intel_bts_snapshot_ref *refs; in intel_bts_alloc_snapshot_refs() local 292 refs = calloc(new_cnt, sz); in intel_bts_alloc_snapshot_refs() 293 if (!refs) in intel_bts_alloc_snapshot_refs() 296 memcpy(refs, btsr->snapshot_refs, cnt * sz); in intel_bts_alloc_snapshot_refs() 298 btsr->snapshot_refs = refs; in intel_bts_alloc_snapshot_refs()
|
D | intel-pt.c | 750 struct intel_pt_snapshot_ref *refs; in intel_pt_alloc_snapshot_refs() local 758 refs = calloc(new_cnt, sz); in intel_pt_alloc_snapshot_refs() 759 if (!refs) in intel_pt_alloc_snapshot_refs() 762 memcpy(refs, ptr->snapshot_refs, cnt * sz); in intel_pt_alloc_snapshot_refs() 764 ptr->snapshot_refs = refs; in intel_pt_alloc_snapshot_refs()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | port.c | 63 table->refs[i] = 0; in mlx4_init_mac_table() 76 table->refs[i] = 0; in mlx4_init_vlan_table() 110 if (table->refs[i] && in find_index() 149 if (!table->refs[i]) in mlx4_find_cached_mac() 174 if (!table->refs[i]) { in __mlx4_register_mac() 184 ++table->refs[i]; in __mlx4_register_mac() 207 table->refs[free] = 1; in __mlx4_register_mac() 270 if (--table->refs[index]) { in __mlx4_unregister_mac() 364 if (table->refs[i] && in mlx4_find_cached_vlan() 393 if (free < 0 && (table->refs[i] == 0)) { in __mlx4_register_vlan() [all …]
|
D | mlx4.h | 738 int refs[MLX4_MAX_MAC_NUM]; member 760 int refs[MLX4_MAX_VLAN_NUM]; member
|
/linux-4.4.14/drivers/staging/most/mostcore/ |
D | core.c | 43 int refs; member 1249 if (c->aim0.refs && c->aim0.ptr->tx_completion) in arm_mbo() 1252 if (c->aim1.refs && c->aim1.ptr->tx_completion) in arm_mbo() 1426 if (c->aim0.refs && c->aim1.refs && in most_get_mbo() 1509 if (c->aim0.refs && c->aim0.ptr->rx_completion && in most_read_completion() 1513 if (c->aim1.refs && c->aim1.ptr->rx_completion && in most_read_completion() 1541 if (c->aim0.refs + c->aim1.refs > 0) in most_start_channel() 1583 c->aim0.refs++; in most_start_channel() 1585 c->aim1.refs++; in most_start_channel() 1617 if (c->aim0.refs + c->aim1.refs >= 2) in most_stop_channel() [all …]
|
/linux-4.4.14/arch/powerpc/mm/ |
D | hugetlbpage.c | 1076 int refs; in gup_hugepte() local 1093 refs = 0; in gup_hugepte() 1103 refs++; in gup_hugepte() 1106 if (!page_cache_add_speculative(head, refs)) { in gup_hugepte() 1107 *nr -= refs; in gup_hugepte() 1113 *nr -= refs; in gup_hugepte() 1114 while (refs--) in gup_hugepte() 1123 while (refs--) { in gup_hugepte()
|
/linux-4.4.14/include/trace/events/ |
D | btrfs.h | 185 __field( int, refs ) 197 __entry->refs = atomic_read(&map->refs); 212 __entry->refs, __entry->compress_type) 244 __field( int, refs ) 257 __entry->refs = atomic_read(&ordered->refs); 274 __entry->compress_type, __entry->refs) 693 __field( int, refs ) 702 __entry->refs = atomic_read(&buf->refs); 711 __entry->refs,
|
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/ |
D | event.h | 16 int *refs; member
|
/linux-4.4.14/include/uapi/xen/ |
D | gntdev.h | 62 struct ioctl_gntdev_grant_ref refs[1]; member
|
/linux-4.4.14/Documentation/devicetree/bindings/hwmon/ |
D | g762.txt | 25 at http://natisbad.org/NAS/refs/GMT_EDS-762_763-080710-0.2.pdf.
|
/linux-4.4.14/drivers/staging/lustre/include/linux/libcfs/ |
D | libcfs_private.h | 363 void cfs_percpt_atomic_free(atomic_t **refs); 365 int cfs_percpt_atomic_summary(atomic_t **refs);
|
/linux-4.4.14/include/linux/ |
D | rmap.h | 258 #define try_to_unmap(page, refs) SWAP_FAIL argument
|
D | hugetlb.h | 35 struct kref refs; member
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/ |
D | debugfs.c | 1220 if (mvm->refs[ref]) \ 1223 BIT(ref), mvm->refs[ref], #ref); \ 1234 u32 refs = 0; in iwl_dbgfs_d0i3_refs_read() local 1237 if (mvm->refs[i]) in iwl_dbgfs_d0i3_refs_read() 1238 refs |= BIT(i); in iwl_dbgfs_d0i3_refs_read() 1241 refs); in iwl_dbgfs_d0i3_refs_read() 1282 taken = mvm->refs[IWL_MVM_REF_USER]; in iwl_dbgfs_d0i3_refs_write()
|
D | mac80211.c | 219 mvm->refs[ref_type]++; in iwl_mvm_ref() 231 WARN_ON(!mvm->refs[ref_type]--); in iwl_mvm_unref() 246 if (except_ref == i || !mvm->refs[i]) in iwl_mvm_unref_all_except() 250 i, mvm->refs[i]); in iwl_mvm_unref_all_except() 251 for (j = 0; j < mvm->refs[i]; j++) in iwl_mvm_unref_all_except() 253 mvm->refs[i] = 0; in iwl_mvm_unref_all_except() 268 if (mvm->refs[i]) { in iwl_mvm_ref_taken()
|
D | mvm.h | 712 u8 refs[IWL_MVM_REF_COUNT]; member
|
D | ops.c | 595 mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1; in iwl_op_mode_mvm_start()
|
/linux-4.4.14/fs/ncpfs/ |
D | sock.c | 61 atomic_t refs; member 83 atomic_set(&req->refs, (1)); in ncp_alloc_req() 91 atomic_inc(&req->refs); in ncp_req_get() 96 if (atomic_dec_and_test(&req->refs)) in ncp_req_put()
|
/linux-4.4.14/kernel/ |
D | audit_tree.c | 29 atomic_long_t refs; member 122 if (atomic_long_dec_and_test(&chunk->refs)) in audit_put_chunk() 152 atomic_long_set(&chunk->refs, 1); in alloc_chunk() 193 atomic_long_inc(&p->refs); in audit_tree_lookup()
|
/linux-4.4.14/tools/vm/ |
D | slabinfo.c | 30 int refs; member 1092 s->refs++; in link_slabs() 1110 if (!show_single_ref && a->slab->refs == 1) in alias() 1140 if (s->refs > 1 && !show_first_alias) in rename_slabs() 1197 slab->refs = 0; in read_slab_dir()
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
D | obd_config.c | 494 int refs; in class_decref() local 498 refs = atomic_read(&obd->obd_refcount); in class_decref() 502 CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs); in class_decref() 504 if ((refs == 1) && obd->obd_stopping) { in class_decref() 517 if (refs == 0) { in class_decref()
|
/linux-4.4.14/drivers/android/ |
D | binder.c | 234 struct hlist_head refs; member 973 if (node->local_weak_refs || !hlist_empty(&node->refs)) in binder_dec_node() 982 if (hlist_empty(&node->refs) && !node->local_strong_refs && in binder_dec_node() 1076 hlist_add_head(&new_ref->node_entry, &node->refs); in binder_get_ref_for_node() 2270 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; in binder_thread_read() 3019 static int binder_node_release(struct binder_node *node, int refs) in binder_node_release() argument 3027 if (hlist_empty(&node->refs)) { in binder_node_release() 3031 return refs; in binder_node_release() 3039 hlist_for_each_entry(ref, &node->refs, node_entry) { in binder_node_release() 3040 refs++; in binder_node_release() [all …]
|
/linux-4.4.14/drivers/tty/ |
D | tty_io.c | 618 int refs = 0; in tty_signal_session_leader() local 629 refs++; in tty_signal_session_leader() 654 return refs; in tty_signal_session_leader() 685 int refs; in __tty_hangup() local 724 refs = tty_signal_session_leader(tty, exit_session); in __tty_hangup() 726 while (refs--) in __tty_hangup()
|
/linux-4.4.14/tools/perf/util/ |
D | parse-events.l | 264 refs|Reference|ops|access |
|
/linux-4.4.14/fs/ocfs2/dlm/ |
D | dlmcommon.h | 297 struct kref refs; member 877 kref_get(&res->refs); in dlm_lockres_get()
|
D | dlmdebug.c | 109 res->last_used, atomic_read(&res->refs.refcount), in __dlm_print_one_lock_resource() 544 atomic_read(&res->refs.refcount)); in dump_lockres()
|
D | dlmmaster.c | 491 res = container_of(kref, struct dlm_lock_resource, refs); in dlm_lockres_release() 542 kref_put(&res->refs, dlm_lockres_release); in dlm_lockres_put() 578 kref_init(&res->refs); in dlm_init_lockres()
|
/linux-4.4.14/fs/hugetlbfs/ |
D | inode.c | 451 resv_map_release(&resv_map->refs); in hugetlbfs_evict_inode() 770 kref_put(&resv_map->refs, resv_map_release); in hugetlbfs_get_inode()
|
/linux-4.4.14/drivers/xen/ |
D | gntdev.c | 610 if (copy_from_user(map->grants, &u->refs, in gntdev_ioctl_map_grant_ref()
|
/linux-4.4.14/Documentation/development-process/ |
D | 7.AdvancedTopics | 40 own terminology and concepts; a new user of git should know about refs,
|
/linux-4.4.14/Documentation/filesystems/caching/ |
D | backend-api.txt | 187 backend to do the operation. The backend may get extra refs to it by 188 calling fscache_get_retrieval() and refs may be discarded by calling
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_main.c | 2881 int refs = 3; in drbd_delete_device() local 2889 refs++; in drbd_delete_device() 2895 kref_sub(&device->kref, refs, drbd_destroy_device); in drbd_delete_device()
|
/linux-4.4.14/Documentation/networking/ |
D | rxrpc.txt | 797 of. It is possible to get extra refs on all types of message for later
|
/linux-4.4.14/Documentation/filesystems/ |
D | vfs.txt | 1038 the case of failure. The vfsmount should be returned with 2 refs on
|
/linux-4.4.14/Documentation/virtual/uml/ |
D | UserModeLinux-HOWTO.txt | 2729 nsyms = 57, ndeps = 0, syms = 0xa9023170, deps = 0x0, refs = 0x0,
|